summaryrefslogtreecommitdiffstats
path: root/infra/scripts
diff options
context:
space:
mode:
authorphajdan.jr <phajdan.jr@chromium.org>2015-07-09 07:41:38 -0700
committerCommit bot <commit-bot@chromium.org>2015-07-09 14:42:24 +0000
commit7bad5e0aa9bfbb0fa979ba94a5f92cfeeecafa8f (patch)
tree3e8a59cc64cada70970eab821a65a05d93867eda /infra/scripts
parenta56aa7bcb6e1872c9daac2076ab108ef8d176084 (diff)
downloadchromium_src-7bad5e0aa9bfbb0fa979ba94a5f92cfeeecafa8f.zip
chromium_src-7bad5e0aa9bfbb0fa979ba94a5f92cfeeecafa8f.tar.gz
chromium_src-7bad5e0aa9bfbb0fa979ba94a5f92cfeeecafa8f.tar.bz2
runtest.py: remove unused code (perf etc)
BUG=507298 Review URL: https://codereview.chromium.org/1223273002 Cr-Commit-Position: refs/heads/master@{#338038}
Diffstat (limited to 'infra/scripts')
-rwxr-xr-xinfra/scripts/legacy/scripts/slave/crash_utils.py46
-rwxr-xr-xinfra/scripts/legacy/scripts/slave/results_dashboard.py393
-rwxr-xr-xinfra/scripts/legacy/scripts/slave/runtest.py24
3 files changed, 1 insertions, 462 deletions
diff --git a/infra/scripts/legacy/scripts/slave/crash_utils.py b/infra/scripts/legacy/scripts/slave/crash_utils.py
deleted file mode 100755
index db9d1bd..0000000
--- a/infra/scripts/legacy/scripts/slave/crash_utils.py
+++ /dev/null
@@ -1,46 +0,0 @@
-#!/usr/bin/python
-# Copyright (c) 2012 The Chromium Authors. All rights reserved.
-# Use of this source code is governed by a BSD-style license that can be
-# found in the LICENSE file.
-
-"""A utility for crash reporting."""
-
-import os
-import time
-
-from common import chromium_utils
-
-
-def print_new_crash_files(new_crash_files):
- """Prints all the new crash files."""
- if new_crash_files:
- print '\nFound %d new crash file(s), dumping them:' % (
- len(new_crash_files))
- for crash_file in new_crash_files:
- print 'File: ' + crash_file
- print '=' * (6 + len(crash_file))
- for crash_line in open(crash_file):
- print ' ' + crash_line.rstrip()
- print ''
-
-
-def list_crash_logs():
- """List all the crash files stored in the user directory."""
- reports_dir = os.path.expanduser('~/Library/Logs/DiagnosticReports')
- result = [x for x in chromium_utils.LocateFiles('*.crash', reports_dir)]
- return result
-
-
-def wait_for_crash_logs():
- """Sleeps for a while to allow the crash logs to be written.
-
- The crash reporter runs asynchronously out of process, so when a unittest
- crashes nothing says the crash log is written immediately. This method is a
- hack to allow time for the crash logs to be written. Ninety seconds is picked
- from looking at data on the bots."""
- # TODO(lakshya): Optimize by polling every 10 seconds for a crash log to be
- # available instead of waiting for 90 seconds.
- print ('\nNote: Test finished with non zero status, sleeping for 90s to '
- 'allow crash files to be written.')
- time.sleep(90)
-
diff --git a/infra/scripts/legacy/scripts/slave/results_dashboard.py b/infra/scripts/legacy/scripts/slave/results_dashboard.py
deleted file mode 100755
index 9c5b5ab..0000000
--- a/infra/scripts/legacy/scripts/slave/results_dashboard.py
+++ /dev/null
@@ -1,393 +0,0 @@
-#!/usr/bin/env python
-# Copyright (c) 2013 The Chromium Authors. All rights reserved.
-# Use of this source code is governed by a BSD-style license that can be
-# found in the LICENSE file.
-
-"""Functions for adding results to perf dashboard."""
-
-import calendar
-import datetime
-import httplib
-import json
-import os
-import urllib
-import urllib2
-
-from slave import slave_utils
-
-# The paths in the results dashboard URLs for sending and viewing results.
-SEND_RESULTS_PATH = '/add_point'
-RESULTS_LINK_PATH = '/report?masters=%s&bots=%s&tests=%s&rev=%s'
-
-# CACHE_DIR/CACHE_FILENAME will be created in options.build_dir to cache
-# results which need to be retried.
-CACHE_DIR = 'results_dashboard'
-CACHE_FILENAME = 'results_to_retry'
-
-
-def SendResults(data, url, build_dir):
- """Sends results to the Chrome Performance Dashboard.
-
- This function tries to send the given data to the dashboard, in addition to
- any data from the cache file. The cache file contains any data that wasn't
- successfully sent in a previous run.
-
- Args:
- data: The data to try to send. Must be JSON-serializable.
- url: Performance Dashboard URL (including schema).
- build_dir: Directory name, where the cache directory shall be.
- """
- results_json = json.dumps(data)
-
- # Write the new request line to the cache file, which contains all lines
- # that we shall try to send now.
- cache_file_name = _GetCacheFileName(build_dir)
- _AddLineToCacheFile(results_json, cache_file_name)
-
- # Send all the results from this run and the previous cache to the dashboard.
- fatal_error, errors = _SendResultsFromCache(cache_file_name, url)
-
- # Print out a Buildbot link annotation.
- link_annotation = _LinkAnnotation(url, data)
- if link_annotation:
- print link_annotation
-
- # Print any errors; if there was a fatal error, it should be an exception.
- for error in errors:
- print error
- if fatal_error:
- print 'Error uploading to dashboard.'
- print '@@@STEP_EXCEPTION@@@'
- return False
- return True
-
-
-def _GetCacheFileName(build_dir):
- """Gets the cache filename, creating the file if it does not exist."""
- cache_dir = os.path.join(os.path.abspath(build_dir), CACHE_DIR)
- if not os.path.exists(cache_dir):
- os.makedirs(cache_dir)
- cache_filename = os.path.join(cache_dir, CACHE_FILENAME)
- if not os.path.exists(cache_filename):
- # Create the file.
- open(cache_filename, 'wb').close()
- return cache_filename
-
-
-def _AddLineToCacheFile(line, cache_file_name):
- """Appends a line to the given file."""
- with open(cache_file_name, 'ab') as cache:
- cache.write('\n' + line)
-
-
-def _SendResultsFromCache(cache_file_name, url):
- """Tries to send each line from the cache file in a separate request.
-
- This also writes data which failed to send back to the cache file.
-
- Args:
- cache_file_name: A file name.
-
- Returns:
- A pair (fatal_error, errors), where fatal_error is a boolean indicating
- whether there there was a major error and the step should fail, and errors
- is a list of error strings.
- """
- with open(cache_file_name, 'rb') as cache:
- cache_lines = cache.readlines()
- total_results = len(cache_lines)
-
- fatal_error = False
- errors = []
-
- lines_to_retry = []
- for index, line in enumerate(cache_lines):
- line = line.strip()
- if not line:
- continue
- print 'Sending result %d of %d to dashboard.' % (index + 1, total_results)
-
- # Check that the line that was read from the file is valid JSON. If not,
- # don't try to send it, and don't re-try it later; just print an error.
- if not _CanParseJSON(line):
- errors.append('Could not parse JSON: %s' % line)
- continue
-
- error = _SendResultsJson(url, line)
-
- # If the dashboard returned an error, we will re-try next time.
- if error:
- if 'HTTPError: 400' in error:
- # If the remote app rejects the JSON, it's probably malformed,
- # so we don't want to retry it.
- print 'Discarding JSON, error:\n%s' % error
- fatal_error = True
- break
-
- if index != len(cache_lines) - 1:
- # The very last item in the cache_lines list is the new results line.
- # If this line is not the new results line, then this results line
- # has already been tried before; now it's considered fatal.
- fatal_error = True
-
- # The lines to retry are all lines starting from the current one.
- lines_to_retry = [l.strip() for l in cache_lines[index:] if l.strip()]
- errors.append(error)
- break
-
- # Write any failing requests to the cache file.
- cache = open(cache_file_name, 'wb')
- cache.write('\n'.join(set(lines_to_retry)))
- cache.close()
-
- return fatal_error, errors
-
-
-def _CanParseJSON(my_json):
- """Returns True if the input can be parsed as JSON, False otherwise."""
- try:
- json.loads(my_json)
- except ValueError:
- return False
- return True
-
-
-def MakeListOfPoints(charts, bot, test_name, mastername, buildername,
- buildnumber, supplemental_columns):
- """Constructs a list of point dictionaries to send.
-
- The format output by this function is the original format for sending data
- to the perf dashboard. Each
-
- Args:
- charts: A dictionary of chart names to chart data, as generated by the
- log processor classes (see process_log_utils.GraphingLogProcessor).
- bot: A string which comes from perf_id, e.g. linux-release.
- test_name: A test suite name, e.g. sunspider.
- mastername: Buildbot master name, e.g. chromium.perf.
- buildername: Builder name (for stdio links).
- buildnumber: Build number (for stdio links).
- supplemental_columns: A dictionary of extra data to send with a point.
-
- Returns:
- A list of dictionaries in the format accepted by the perf dashboard.
- Each dictionary has the keys "master", "bot", "test", "value", "revision".
- The full details of this format are described at http://goo.gl/TcJliv.
- """
- results = []
-
- # The master name used for the dashboard is the CamelCase name returned by
- # GetActiveMaster(), and not the canonical master name with dots.
- master = slave_utils.GetActiveMaster()
-
- for chart_name, chart_data in sorted(charts.items()):
- point_id, revision_columns = _RevisionNumberColumns(chart_data, prefix='r_')
-
- for trace_name, trace_values in sorted(chart_data['traces'].items()):
- is_important = trace_name in chart_data.get('important', [])
- test_path = _TestPath(test_name, chart_name, trace_name)
- result = {
- 'master': master,
- 'bot': bot,
- 'test': test_path,
- 'revision': point_id,
- 'masterid': mastername,
- 'buildername': buildername,
- 'buildnumber': buildnumber,
- 'supplemental_columns': {}
- }
-
- # Add the supplemental_columns values that were passed in after the
- # calculated revision column values so that these can be overwritten.
- result['supplemental_columns'].update(revision_columns)
- result['supplemental_columns'].update(supplemental_columns)
-
- result['value'] = trace_values[0]
- result['error'] = trace_values[1]
-
- # Add other properties to this result dictionary if available.
- if chart_data.get('units'):
- result['units'] = chart_data['units']
- if is_important:
- result['important'] = True
-
- results.append(result)
-
- return results
-
-
-def MakeDashboardJsonV1(chart_json, revision_data, bot, mastername,
- buildername, buildnumber, supplemental_dict, is_ref):
- """Generates Dashboard JSON in the new Telemetry format.
-
- See http://goo.gl/mDZHPl for more info on the format.
-
- Args:
- chart_json: A dict containing the telmetry output.
- revision_data: Data about revisions to include in the upload.
- bot: A string which comes from perf_id, e.g. linux-release.
- mastername: Buildbot master name, e.g. chromium.perf.
- buildername: Builder name (for stdio links).
- buildnumber: Build number (for stdio links).
- supplemental_columns: A dictionary of extra data to send with a point.
- is_ref: True if this is a reference build, False otherwise.
-
- Returns:
- A dictionary in the format accepted by the perf dashboard.
- """
- if not chart_json:
- print 'Error: No json output from telemetry.'
- print '@@@STEP_FAILURE@@@'
- # The master name used for the dashboard is the CamelCase name returned by
- # GetActiveMaster(), and not the canonical master name with dots.
- master = slave_utils.GetActiveMaster()
- point_id, revision_columns = _RevisionNumberColumns(revision_data, prefix='')
- supplemental_columns = {}
- for key in supplemental_dict:
- supplemental_columns[key.replace('a_', '', 1)] = supplemental_dict[key]
- fields = {
- 'master': master,
- 'bot': bot,
- 'masterid': mastername,
- 'buildername': buildername,
- 'buildnumber': buildnumber,
- 'point_id': point_id,
- 'supplemental': supplemental_columns,
- 'versions': revision_columns,
- 'chart_data': chart_json,
- 'is_ref': is_ref,
- }
- return fields
-
-
-def _GetTimestamp():
- """Get the Unix timestamp for the current time."""
- return int(calendar.timegm(datetime.datetime.utcnow().utctimetuple()))
-
-
-def _RevisionNumberColumns(data, prefix):
- """Get the revision number and revision-related columns from the given data.
-
- Args:
- data: A dict of information from one line of the log file.
- master: The name of the buildbot master.
- prefix: Prefix for revision type keys. 'r_' for non-telemetry json, '' for
- telemetry json.
-
- Returns:
- A tuple with the point id (which must be an int), and a dict of
- revision-related columns.
- """
- revision_supplemental_columns = {}
-
- # The dashboard requires points' x-values to be integers, and points are
- # ordered by these x-values. If data['rev'] can't be parsed as an int, assume
- # that it's a git commit hash and use timestamp as the x-value.
- try:
- revision = int(data['rev'])
- if revision and revision > 300000 and revision < 1000000:
- # Revision is the commit pos.
- # TODO(sullivan,qyearsley): use got_revision_cp when available.
- revision_supplemental_columns[prefix + 'commit_pos'] = revision
- except ValueError:
- # The dashboard requires ordered integer revision numbers. If the revision
- # is not an integer, assume it's a git hash and send a timestamp.
- revision = _GetTimestamp()
- revision_supplemental_columns[prefix + 'chromium'] = data['rev']
-
- # For other revision data, add it if it's present and not undefined:
- for key in ['webkit_rev', 'webrtc_rev', 'v8_rev']:
- if key in data and data[key] != 'undefined':
- revision_supplemental_columns[prefix + key] = data[key]
-
- # If possible, also send the git hash.
- if 'git_revision' in data and data['git_revision'] != 'undefined':
- revision_supplemental_columns[prefix + 'chromium'] = data['git_revision']
-
- return revision, revision_supplemental_columns
-
-
-def _TestPath(test_name, chart_name, trace_name):
- """Get the slash-separated test path to send.
-
- Args:
- test: Test name. Typically, this will be a top-level 'test suite' name.
- chart_name: Name of a chart where multiple trace lines are grouped. If the
- chart name is the same as the trace name, that signifies that this is
- the main trace for the chart.
- trace_name: The "trace name" is the name of an individual line on chart.
-
- Returns:
- A slash-separated list of names that corresponds to the hierarchy of test
- data in the Chrome Performance Dashboard; doesn't include master or bot
- name.
- """
- # For tests run on reference builds by builds/scripts/slave/telemetry.py,
- # "_ref" is appended to the trace name. On the dashboard, as long as the
- # result is on the right chart, it can just be called "ref".
- if trace_name == chart_name + '_ref':
- trace_name = 'ref'
- chart_name = chart_name.replace('_by_url', '')
-
- # No slashes are allowed in the trace name.
- trace_name = trace_name.replace('/', '_')
-
- # The results for "test/chart" and "test/chart/*" will all be shown on the
- # same chart by the dashboard. The result with path "test/path" is considered
- # the main trace for the chart.
- test_path = '%s/%s/%s' % (test_name, chart_name, trace_name)
- if chart_name == trace_name:
- test_path = '%s/%s' % (test_name, chart_name)
- return test_path
-
-
-def _SendResultsJson(url, results_json):
- """Make a HTTP POST with the given JSON to the Performance Dashboard.
-
- Args:
- url: URL of Performance Dashboard instance, e.g.
- "https://chromeperf.appspot.com".
- results_json: JSON string that contains the data to be sent.
-
- Returns:
- None if successful, or an error string if there were errors.
- """
- # When data is provided to urllib2.Request, a POST is sent instead of GET.
- # The data must be in the application/x-www-form-urlencoded format.
- data = urllib.urlencode({'data': results_json})
- req = urllib2.Request(url + SEND_RESULTS_PATH, data)
- try:
- urllib2.urlopen(req)
- except urllib2.HTTPError as e:
- return ('HTTPError: %d. Reponse: %s\n'
- 'JSON: %s\n' % (e.code, e.read(), results_json))
- except urllib2.URLError as e:
- return 'URLError: %s for JSON %s\n' % (str(e.reason), results_json)
- except httplib.HTTPException as e:
- return 'HTTPException for JSON %s\n' % results_json
- return None
-
-
-def _LinkAnnotation(url, data):
- """Prints a link annotation with a link to the dashboard if possible.
-
- Args:
- url: The Performance Dashboard URL, e.g. "https://chromeperf.appspot.com"
- data: The data that's being sent to the dashboard.
-
- Returns:
- An annotation to print, or None.
- """
- if not data:
- return None
- if isinstance(data, list):
- master, bot, test, revision = (
- data[0]['master'], data[0]['bot'], data[0]['test'], data[0]['revision'])
- else:
- master, bot, test, revision = (
- data['master'], data['bot'], data['chart_data']['benchmark_name'],
- data['point_id'])
- results_link = url + RESULTS_LINK_PATH % (
- urllib.quote(master), urllib.quote(bot), urllib.quote(test.split('/')[0]),
- revision)
- return '@@@STEP_LINK@%s@%s@@@' % ('Results Dashboard', results_link)
diff --git a/infra/scripts/legacy/scripts/slave/runtest.py b/infra/scripts/legacy/scripts/slave/runtest.py
index 517d763..fac25b9 100755
--- a/infra/scripts/legacy/scripts/slave/runtest.py
+++ b/infra/scripts/legacy/scripts/slave/runtest.py
@@ -36,9 +36,7 @@ import config
from slave import annotation_utils
from slave import build_directory
-from slave import crash_utils
from slave import gtest_slave_utils
-from slave import results_dashboard
from slave import slave_utils
from slave import xvfb
@@ -441,9 +439,7 @@ def _Main(options, args, extra_env):
return 1
if options.annotate:
- annotation_utils.annotate(
- options.test_type, result, log_processor,
- perf_dashboard_id=options.perf_dashboard_id)
+ annotation_utils.annotate(options.test_type, result, log_processor)
return result
@@ -681,9 +677,6 @@ def main():
logging.basicConfig(level=logging.DEBUG)
logging.getLogger().addHandler(logging.StreamHandler(stream=sys.stdout))
- if not options.perf_dashboard_id:
- options.perf_dashboard_id = options.factory_properties.get('test_name')
-
options.test_type = options.test_type or options.factory_properties.get(
'step_name', '')
@@ -723,21 +716,6 @@ def main():
extra_env['GTEST_TOTAL_SHARDS'] = str(options.total_shards)
extra_env['GTEST_SHARD_INDEX'] = str(options.shard_index - 1)
- # If perf config is passed via command line, parse the string into a dict.
- if options.perf_config:
- try:
- options.perf_config = ast.literal_eval(options.perf_config)
- assert type(options.perf_config) is dict, (
- 'Value of --perf-config couldn\'t be evaluated into a dict.')
- except (exceptions.SyntaxError, ValueError):
- option_parser.error('Failed to parse --perf-config value into a dict: '
- '%s' % options.perf_config)
- return 1
-
- # Allow factory property 'perf_config' as well during a transition period.
- options.perf_config = (options.perf_config or
- options.factory_properties.get('perf_config'))
-
if options.results_directory:
options.test_output_xml = os.path.normpath(os.path.abspath(os.path.join(
options.results_directory, '%s.xml' % options.test_type)))