summaryrefslogtreecommitdiffstats
path: root/build/android/pylib/perf/test_runner.py
blob: 0e5983134b6bdd29e92f42836145bc69cff167e2 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
# Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.

"""Runs a perf test on a single device.

Our buildbot infrastructure requires each slave to run steps serially.
This is sub-optimal for android, where these steps can run independently on
multiple connected devices.

The buildbots will run this script multiple times per cycle:
- First: all steps listed in --steps in will be executed in parallel using all
connected devices. Step results will be pickled to disk. Each step has a unique
name. The result code will be ignored if the step name is listed in
--flaky-steps.
The buildbot will treat this step as a regular step, and will not process any
graph data.

- Then, with -print-step STEP_NAME: at this stage, we'll simply print the file
with the step results previously saved. The buildbot will then process the graph
data accordingly.

The JSON steps file contains a dictionary in the format:
{
  "step_name_foo": "script_to_execute foo",
  "step_name_bar": "script_to_execute bar"
}

The JSON flaky steps file contains a list with step names which results should
be ignored:
[
  "step_name_foo",
  "step_name_bar"
]

Note that script_to_execute necessarily have to take at least the following
options:
  --device: the serial number to be passed to all adb commands.
  --keep_test_server_ports: indicates it's being run as a shard, and shouldn't
  reset test server port allocation.
"""

import datetime
import logging
import pexpect
import pickle
import os
import sys
import time

from pylib import constants
from pylib.base import base_test_result
from pylib.base import base_test_runner


_OUTPUT_DIR = os.path.join(constants.DIR_SOURCE_ROOT, 'out', 'step_results')


def PrintTestOutput(test_name):
  """Helper method to print the output of previously executed test_name.

  Args:
    test_name: name of the test that has been previously executed.

  Returns:
    exit code generated by the test step.
  """
  file_name = os.path.join(_OUTPUT_DIR, test_name)
  if not os.path.exists(file_name):
    logging.error('File not found %s', file_name)
    return 1

  with file(file_name, 'r') as f:
    persisted_result = pickle.loads(f.read())
  print persisted_result['output']

  return persisted_result['exit_code']


class TestRunner(base_test_runner.BaseTestRunner):
  def __init__(self, test_options, device, tests, flaky_tests):
    """A TestRunner instance runs a perf test on a single device.

    Args:
      test_options: A PerfOptions object.
      device: Device to run the tests.
      tests: a dict mapping test_name to command.
      flaky_tests: a list of flaky test_name.
    """
    super(TestRunner, self).__init__(device, None, 'Release')
    self._options = test_options
    self._tests = tests
    self._flaky_tests = flaky_tests

  @staticmethod
  def _SaveResult(result):
    with file(os.path.join(_OUTPUT_DIR, result['name']), 'w') as f:
      f.write(pickle.dumps(result))

  def _LaunchPerfTest(self, test_name):
    """Runs a perf test.

    Args:
      test_name: the name of the test to be executed.

    Returns:
      A tuple containing (Output, base_test_result.ResultType)
    """
    cmd = ('%s --device %s --keep_test_server_ports' %
           (self._tests[test_name], self.device))
    start_time = datetime.datetime.now()
    output, exit_code = pexpect.run(
        cmd, cwd=os.path.abspath(constants.DIR_SOURCE_ROOT),
        withexitstatus=True, logfile=sys.stdout, timeout=1800,
        env=os.environ)
    end_time = datetime.datetime.now()
    logging.info('%s : exit_code=%d in %d secs at %s',
                 test_name, exit_code, (end_time - start_time).seconds,
                 self.device)
    result_type = base_test_result.ResultType.FAIL
    if exit_code == 0:
      result_type = base_test_result.ResultType.PASS
    if test_name in self._flaky_tests:
      exit_code = 0
      result_type = base_test_result.ResultType.PASS

    persisted_result = {
        'name': test_name,
        'output': output,
        'exit_code': exit_code,
        'result_type': result_type,
        'total_time': (end_time - start_time).seconds,
        'device': self.device,
    }
    self._SaveResult(persisted_result)

    return (output, result_type)

  def RunTest(self, test_name):
    """Run a perf test on the device.

    Args:
      test_name: String to use for logging the test result.

    Returns:
      A tuple of (TestRunResults, retry).
    """
    output, result_type = self._LaunchPerfTest(test_name)
    results = base_test_result.TestRunResults()
    results.AddResult(base_test_result.BaseTestResult(test_name, result_type))
    retry = None
    if not results.DidRunPass():
      retry = test_name
    return results, retry