summaryrefslogtreecommitdiffstats
path: root/build/android/pylib/test_options_parser.py
blob: 97e45804201ac4cb5f3afc829fb69a26e7ebdbaa (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.

"""Parses options for the instrumentation tests."""

import constants
import optparse
import os



def CreateTestRunnerOptionParser(usage=None, default_timeout=60):
  """Returns a new OptionParser with arguments applicable to all tests."""
  option_parser = optparse.OptionParser(usage=usage)
  option_parser.add_option('-t', dest='timeout',
                           help='Timeout to wait for each test',
                           type='int',
                           default=default_timeout)
  option_parser.add_option('-c', dest='cleanup_test_files',
                           help='Cleanup test files on the device after run',
                           action='store_true')
  option_parser.add_option('-v',
                           '--verbose',
                           dest='verbose_count',
                           default=0,
                           action='count',
                           help='Verbose level (multiple times for more)')
  profilers = ['activitymonitor', 'chrometrace', 'dumpheap', 'smaps',
               'traceview']
  option_parser.add_option('--profiler', dest='profilers', action='append',
                           choices=profilers,
                           help='Profiling tool to run during test. '
                           'Pass multiple times to run multiple profilers. '
                           'Available profilers: %s' % profilers)
  option_parser.add_option('--tool',
                           dest='tool',
                           help='Run the test under a tool '
                           '(use --tool help to list them)')
  return option_parser


def ParseInstrumentationArgs(args):
  """Parse arguments and return options with defaults."""

  option_parser = CreateTestRunnerOptionParser()
  option_parser.add_option('-w', '--wait_debugger', dest='wait_for_debugger',
                           action='store_true', help='Wait for debugger.')
  option_parser.add_option('-I', dest='install_apk', help='Install APK.',
                           action='store_true')
  option_parser.add_option('-f', '--test_filter',
                           help='Test filter (if not fully qualified, '
                           'will run all matches).')
  option_parser.add_option('-A', '--annotation', dest='annotation_str',
                           help=('Run only tests with any of the given '
                                 'annotations. '
                                 'An annotation can be either a key or a '
                                 'key-values pair. '
                                 'A test that has no annotation is '
                                 'considered "SmallTest".'))
  option_parser.add_option('-j', '--java_only', action='store_true',
                           help='Run only the Java tests.')
  option_parser.add_option('-p', '--python_only', action='store_true',
                           help='Run only the Python tests.')
  option_parser.add_option('-n', '--run_count', type='int',
                           dest='number_of_runs', default=1,
                           help=('How many times to run each test, regardless '
                                 'of the result. (Default is 1)'))
  option_parser.add_option('--test-apk', dest='test_apk',
                           help=('The name of the apk containing the tests '
                                 '(without the .apk extension).'))
  option_parser.add_option('--screenshot', dest='screenshot_failures',
                           action='store_true',
                           help='Capture screenshots of test failures')
  option_parser.add_option('--save-perf-json', action='store_true',
                           help='Saves the JSON file for each UI Perf test.')
  option_parser.add_option('--shard_retries', type=int, default=1,
                           help=('Number of times to retry each failure when '
                                 'sharding.'))
  option_parser.add_option('--official-build', help='Run official build tests.')
  option_parser.add_option('--device',
                           help='Serial number of device we should use.')
  option_parser.add_option('--python_test_root',
                           help='Root of the python-driven tests.')

  options, args = option_parser.parse_args(args)
  if len(args) > 1:
    option_parser.error('Unknown argument:', args[1:])
  if options.java_only and options.python_only:
    option_parser.error('Options java_only (-j) and python_only (-p) '
                        'are mutually exclusive')

  options.run_java_tests = True
  options.run_python_tests = True
  if options.java_only:
    options.run_python_tests = False
  elif options.python_only:
    options.run_java_tests = False

  options.test_apk_path = os.path.join(constants.CHROME_DIR,
                                       'out', 'Release',
                                       '%s.apk' % options.test_apk)
  options.test_apk_jar_path = os.path.join(constants.CHROME_DIR,
                                           'out', 'Release',
                                           '%s.jar' % options.test_apk)
  if options.annotation_str:
    options.annotation = options.annotation_str.split()
  elif options.test_filter:
    options.annotation = []
  else:
    options.annotation = ['Smoke', 'SmallTest', 'MediumTest', 'LargeTest']

  return options