summaryrefslogtreecommitdiffstats
path: root/infra/scripts
diff options
context:
space:
mode:
authorphajdan.jr <phajdan.jr@chromium.org>2015-07-03 01:51:51 -0700
committerCommit bot <commit-bot@chromium.org>2015-07-03 08:52:39 +0000
commit48317c0b5849812b84d8a2116bbddac69b0ddc68 (patch)
tree95510c944310423369320c36e7d00758e3862c1f /infra/scripts
parentceced0d4f959e8b2046bcc80fd7a72b1784c758c (diff)
downloadchromium_src-48317c0b5849812b84d8a2116bbddac69b0ddc68.zip
chromium_src-48317c0b5849812b84d8a2116bbddac69b0ddc68.tar.gz
chromium_src-48317c0b5849812b84d8a2116bbddac69b0ddc68.tar.bz2
Fork runtest.py and everything it needs src-side for easier hacking
BUG=506498 Review URL: https://codereview.chromium.org/1213433006 Cr-Commit-Position: refs/heads/master@{#337295}
Diffstat (limited to 'infra/scripts')
-rw-r--r--infra/scripts/legacy/README20
-rw-r--r--infra/scripts/legacy/scripts/common/__init__.py0
-rw-r--r--infra/scripts/legacy/scripts/common/chromium_utils.py1914
-rwxr-xr-xinfra/scripts/legacy/scripts/common/env.py439
-rwxr-xr-xinfra/scripts/legacy/scripts/common/gtest_utils.py659
-rw-r--r--infra/scripts/legacy/scripts/common/url_helper.py60
-rw-r--r--infra/scripts/legacy/scripts/slave/__init__.py0
-rw-r--r--infra/scripts/legacy/scripts/slave/annotation_utils.py127
-rw-r--r--infra/scripts/legacy/scripts/slave/bootstrap.py47
-rw-r--r--infra/scripts/legacy/scripts/slave/build_directory.py111
-rwxr-xr-xinfra/scripts/legacy/scripts/slave/crash_utils.py46
-rw-r--r--infra/scripts/legacy/scripts/slave/gtest/__init__.py0
-rw-r--r--infra/scripts/legacy/scripts/slave/gtest/json_results_generator.py255
-rw-r--r--infra/scripts/legacy/scripts/slave/gtest/networktransaction.py46
-rw-r--r--infra/scripts/legacy/scripts/slave/gtest/test_result.py42
-rw-r--r--infra/scripts/legacy/scripts/slave/gtest/test_results_uploader.py32
-rwxr-xr-xinfra/scripts/legacy/scripts/slave/gtest_slave_utils.py273
-rw-r--r--infra/scripts/legacy/scripts/slave/performance_log_processor.py844
-rwxr-xr-xinfra/scripts/legacy/scripts/slave/results_dashboard.py393
-rwxr-xr-xinfra/scripts/legacy/scripts/slave/runisolatedtest.py169
-rwxr-xr-xinfra/scripts/legacy/scripts/slave/runtest.py1948
-rw-r--r--infra/scripts/legacy/scripts/slave/slave_utils.py735
-rwxr-xr-xinfra/scripts/legacy/scripts/slave/telemetry_utils.py114
-rw-r--r--infra/scripts/legacy/scripts/slave/xvfb.py138
-rw-r--r--infra/scripts/legacy/site_config/config.py32
-rw-r--r--infra/scripts/legacy/site_config/config_bootstrap.py128
-rw-r--r--infra/scripts/legacy/site_config/config_default.py230
-rwxr-xr-xinfra/scripts/runtest_wrapper.py24
28 files changed, 8820 insertions, 6 deletions
diff --git a/infra/scripts/legacy/README b/infra/scripts/legacy/README
new file mode 100644
index 0000000..a6b1e9e
--- /dev/null
+++ b/infra/scripts/legacy/README
@@ -0,0 +1,20 @@
+This directory contains files copied from the build repo:
+ - https://chromium.googlesource.com/chromium/tools/build.git
+ - svn://svn.chromium.org/chrome/trunk/tools/build
+
+The revision is:
+ - 353c0efac75463c521763f601bcbdfffc8b291a8 (git)
+ - 295912 (svn)
+
+This exists because src-side code can be easily tested on trybots
+before committing, as opposed to build-side code.
+
+Unfortunately, build-side code corresponding to this directory still
+exists, so it's a fork.
+
+The ultimate goal is to get rid of this code. No new pieces of infrastructure
+should be using it. It's only here for already existing pieces that we need
+to keep running.
+
+Please see https://code.google.com/p/chromium/issues/detail?id=506498
+for more context.
diff --git a/infra/scripts/legacy/scripts/common/__init__.py b/infra/scripts/legacy/scripts/common/__init__.py
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/infra/scripts/legacy/scripts/common/__init__.py
diff --git a/infra/scripts/legacy/scripts/common/chromium_utils.py b/infra/scripts/legacy/scripts/common/chromium_utils.py
new file mode 100644
index 0000000..b41cd44
--- /dev/null
+++ b/infra/scripts/legacy/scripts/common/chromium_utils.py
@@ -0,0 +1,1914 @@
+# Copyright (c) 2012 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+""" Set of basic operations/utilities that are used by the build. """
+
+from contextlib import contextmanager
+import ast
+import base64
+import cStringIO
+import copy
+import errno
+import fnmatch
+import glob
+import math
+import multiprocessing
+import os
+import re
+import shutil
+import socket
+import stat
+import string # pylint: disable=W0402
+import subprocess
+import sys
+import threading
+import time
+import traceback
+import urllib
+import zipfile
+import zlib
+
+try:
+ import json # pylint: disable=F0401
+except ImportError:
+ import simplejson as json
+
+from common import env
+
+
+BUILD_DIR = os.path.realpath(os.path.join(
+ os.path.dirname(__file__), os.pardir, os.pardir))
+
+
+WIN_LINK_FUNC = None
+try:
+ if sys.platform.startswith('win'):
+ import ctypes
+ # There's 4 possibilities on Windows for links:
+ # 1. Symbolic file links;
+ # 2. Symbolic directory links;
+ # 3. Hardlinked files;
+ # 4. Junctioned directories.
+ # (Hardlinked directories don't really exist.)
+ #
+ # 7-Zip does not handle symbolic file links as we want (it puts the
+ # content of the link, not what it refers to, and reports "CRC Error" on
+ # extraction). It does work as expected for symbolic directory links.
+ # Because the majority of the large files are in the root of the staging
+ # directory, we do however need to handle file links, so we do this with
+ # hardlinking. Junctioning requires a huge whack of code, so we take the
+ # slightly odd tactic of using #2 and #3, but not #1 and #4. That is,
+ # hardlinks for files, but symbolic links for directories.
+ def _WIN_LINK_FUNC(src, dst):
+ print 'linking %s -> %s' % (src, dst)
+ if os.path.isdir(src):
+ if not ctypes.windll.kernel32.CreateSymbolicLinkA(
+ str(dst), str(os.path.abspath(src)), 1):
+ raise ctypes.WinError()
+ else:
+ if not ctypes.windll.kernel32.CreateHardLinkA(str(dst), str(src), 0):
+ raise ctypes.WinError()
+ WIN_LINK_FUNC = _WIN_LINK_FUNC
+except ImportError:
+ # If we don't have ctypes or aren't on Windows, leave WIN_LINK_FUNC as None.
+ pass
+
+
+# Wrapper around git that enforces a timeout.
+GIT_BIN = os.path.join(BUILD_DIR, 'scripts', 'tools', 'git-with-timeout')
+
+# Wrapper around svn that enforces a timeout.
+SVN_BIN = os.path.join(BUILD_DIR, 'scripts', 'tools', 'svn-with-timeout')
+
+# The Google Storage metadata key for the full commit position
+GS_COMMIT_POSITION_KEY = 'Cr-Commit-Position'
+# The Google Storage metadata key for the commit position number
+GS_COMMIT_POSITION_NUMBER_KEY = 'Cr-Commit-Position-Number'
+# The Google Storage metadata key for the Git commit hash
+GS_GIT_COMMIT_KEY = 'Cr-Git-Commit'
+
+# Regular expression to identify a Git hash
+GIT_COMMIT_HASH_RE = re.compile(r'[a-zA-Z0-9]{40}')
+#
+# Regular expression to parse a commit position
+COMMIT_POSITION_RE = re.compile(r'([^@]+)@{#(\d+)}')
+
+# Local errors.
+class MissingArgument(Exception):
+ pass
+class PathNotFound(Exception):
+ pass
+class ExternalError(Exception):
+ pass
+class NoIdentifiedRevision(Exception):
+ pass
+
+def IsWindows():
+ return sys.platform == 'cygwin' or sys.platform.startswith('win')
+
+def IsLinux():
+ return sys.platform.startswith('linux')
+
+def IsMac():
+ return sys.platform.startswith('darwin')
+
+# For chromeos we need to end up with a different platform name, but the
+# scripts use the values like sys.platform for both the build target and
+# and the running OS, so this gives us a back door that can be hit to
+# force different naming then the default for some of the chromeos build
+# steps.
+override_platform_name = None
+
+
+def OverridePlatformName(name):
+ """Sets the override for PlatformName()"""
+ global override_platform_name
+ override_platform_name = name
+
+
+def PlatformName():
+ """Return a string to be used in paths for the platform."""
+ if override_platform_name:
+ return override_platform_name
+ if IsWindows():
+ return 'win32'
+ if IsLinux():
+ return 'linux'
+ if IsMac():
+ return 'mac'
+ raise NotImplementedError('Unknown platform "%s".' % sys.platform)
+
+
+# Name of the file (inside the packaged build) containing revision number
+# of that build. Also used for determining the latest packaged build.
+FULL_BUILD_REVISION_FILENAME = 'FULL_BUILD_REVISION'
+
+def IsGitCommit(value):
+ """Returns: If a value is a Git commit hash.
+
+ This only works on full Git commit hashes. A value qualifies as a Git commit
+ hash if it only contains hexadecimal numbers and is forty characters long.
+ """
+ if value is None:
+ return False
+ return GIT_COMMIT_HASH_RE.match(str(value)) is not None
+
+
+# GetParentClass allows a class instance to find its parent class using Python's
+# inspect module. This allows a class instantiated from a module to access
+# their parent class's methods even after the containing module has been
+# re-imported and reloaded.
+#
+# Also see:
+# http://code.google.com/p/chromium/issues/detail?id=34089
+# http://atlee.ca/blog/2008/11/21/python-reload-danger-here-be-dragons/
+#
+def GetParentClass(obj, n=1):
+ import inspect
+ if inspect.isclass(obj):
+ return inspect.getmro(obj)[n]
+ else:
+ return inspect.getmro(obj.__class__)[n]
+
+
+def MeanAndStandardDeviation(data):
+ """Calculates mean and standard deviation for the values in the list.
+
+ Args:
+ data: list of numbers
+
+ Returns:
+ Mean and standard deviation for the numbers in the list.
+ """
+ n = len(data)
+ if n == 0:
+ return 0.0, 0.0
+ mean = float(sum(data)) / n
+ variance = sum([(element - mean)**2 for element in data]) / n
+ return mean, math.sqrt(variance)
+
+
+def FilteredMeanAndStandardDeviation(data):
+ """Calculates mean and standard deviation for the values in the list
+ ignoring first occurence of max value (unless there was only one sample).
+
+ Args:
+ data: list of numbers
+
+ Returns:
+ Mean and standard deviation for the numbers in the list ignoring
+ first occurence of max value.
+ """
+
+ def _FilterMax(array):
+ new_array = copy.copy(array) # making sure we are not creating side-effects
+ if len(new_array) != 1:
+ new_array.remove(max(new_array))
+ return new_array
+ return MeanAndStandardDeviation(_FilterMax(data))
+
+def HistogramPercentiles(histogram, percentiles):
+ if not 'buckets' in histogram or not 'count' in histogram:
+ return []
+ computed_percentiles = _ComputePercentiles(histogram['buckets'],
+ histogram['count'],
+ percentiles)
+ output = []
+ for p in computed_percentiles:
+ output.append({'percentile': p, 'value': computed_percentiles[p]})
+ return output
+
+def GeomMeanAndStdDevFromHistogram(histogram):
+ if not 'buckets' in histogram:
+ return 0.0, 0.0
+ count = 0
+ sum_of_logs = 0
+ for bucket in histogram['buckets']:
+ if 'high' in bucket:
+ bucket['mean'] = (bucket['low'] + bucket['high']) / 2.0
+ else:
+ bucket['mean'] = bucket['low']
+ if bucket['mean'] > 0:
+ sum_of_logs += math.log(bucket['mean']) * bucket['count']
+ count += bucket['count']
+
+ if count == 0:
+ return 0.0, 0.0
+
+ sum_of_squares = 0
+ geom_mean = math.exp(sum_of_logs / count)
+ for bucket in histogram['buckets']:
+ if bucket['mean'] > 0:
+ sum_of_squares += (bucket['mean'] - geom_mean) ** 2 * bucket['count']
+ return geom_mean, math.sqrt(sum_of_squares / count)
+
+def _LinearInterpolate(x0, target, x1, y0, y1):
+ """Perform linear interpolation to estimate an intermediate value.
+
+ We assume for some F, F(x0) == y0, and F(x1) == z1.
+
+ We return an estimate for what F(target) should be, using linear
+ interpolation.
+
+ Args:
+ x0: (Float) A location at which some function F() is known.
+ target: (Float) A location at which we need to estimate F().
+ x1: (Float) A second location at which F() is known.
+ y0: (Float) The value of F(x0).
+ y1: (Float) The value of F(x1).
+
+ Returns:
+ (Float) The estimated value of F(target).
+ """
+ if x0 == x1:
+ return (y0 + y1) / 2
+ return (y1 - y0) * (target - x0) / (x1 - x0) + y0
+
+def _BucketInterpolate(last_percentage, target, next_percentage, bucket_min,
+ bucket_max):
+ """Estimate a minimum which should have the target % of samples below it.
+
+ We do linear interpolation only if last_percentage and next_percentage are
+ adjacent, and hence we are in a linear section of a histogram. Once they
+ spread further apart we generally get exponentially broader buckets, and we
+ need to interpolate in the log domain (and exponentiate our result).
+
+ Args:
+ last_percentage: (Float) This is the percentage of samples below bucket_min.
+ target: (Float) A percentage for which we need an estimated bucket.
+ next_percentage: (Float) This is the percentage of samples below bucket_max.
+ bucket_min: (Float) This is the lower value for samples in a bucket.
+ bucket_max: (Float) This exceeds the upper value for samples.
+
+ Returns:
+ (Float) An estimate of what bucket cutoff would have probably had the target
+ percentage.
+ """
+ log_domain = False
+ if bucket_min + 1.5 < bucket_max and bucket_min > 0:
+ log_domain = True
+ bucket_min = math.log(bucket_min)
+ bucket_max = math.log(bucket_max)
+ result = _LinearInterpolate(
+ last_percentage, target, next_percentage, bucket_min, bucket_max)
+ if log_domain:
+ result = math.exp(result)
+ return result
+
+def _ComputePercentiles(buckets, total, percentiles):
+ """Compute percentiles for the given histogram.
+
+ Returns estimates for the bucket cutoffs that would probably have the taret
+ percentiles.
+
+ Args:
+ buckets: (List) A list of buckets representing the histogram to analyze.
+ total: (Float) The total number of samples in the histogram.
+ percentiles: (Tuple) The percentiles we are interested in.
+
+ Returns:
+ (Dictionary) Map from percentiles to bucket cutoffs.
+ """
+ if not percentiles:
+ return {}
+ current_count = 0
+ current_percentage = 0
+ next_percentile_index = 0
+ result = {}
+ for bucket in buckets:
+ if bucket['count'] > 0:
+ current_count += bucket['count']
+ old_percentage = current_percentage
+ current_percentage = float(current_count) / total
+
+ # Check whether we passed one of the percentiles we're interested in.
+ while (next_percentile_index < len(percentiles) and
+ current_percentage > percentiles[next_percentile_index]):
+ if not 'high' in bucket:
+ result[percentiles[next_percentile_index]] = bucket['low']
+ else:
+ result[percentiles[next_percentile_index]] = float(_BucketInterpolate(
+ old_percentage, percentiles[next_percentile_index],
+ current_percentage, bucket['low'], bucket['high']))
+ next_percentile_index += 1
+ return result
+
+class InitializePartiallyWithArguments:
+ # pylint: disable=old-style-class
+ """Function currying implementation.
+
+ Works for constructors too. Primary use is to be able to construct a class
+ with some constructor arguments beings set ahead of actual initialization.
+ Copy of an ASPN cookbook (#52549).
+ """
+
+ def __init__(self, clazz, *args, **kwargs):
+ self.clazz = clazz
+ self.pending = args[:]
+ self.kwargs = kwargs.copy()
+
+ def __call__(self, *args, **kwargs):
+ if kwargs and self.kwargs:
+ kw = self.kwargs.copy()
+ kw.update(kwargs)
+ else:
+ kw = kwargs or self.kwargs
+
+ return self.clazz(*(self.pending + args), **kw)
+
+
+def Prepend(filepath, text):
+ """ Prepends text to the file.
+
+ Creates the file if it does not exist.
+ """
+ file_data = text
+ if os.path.exists(filepath):
+ file_data += open(filepath).read()
+ f = open(filepath, 'w')
+ f.write(file_data)
+ f.close()
+
+
+def MakeWorldReadable(path):
+ """Change the permissions of the given path to make it world-readable.
+ This is often needed for archived files, so they can be served by web servers
+ or accessed by unprivileged network users."""
+
+ # No need to do anything special on Windows.
+ if IsWindows():
+ return
+
+ perms = stat.S_IMODE(os.stat(path)[stat.ST_MODE])
+ if os.path.isdir(path):
+ # Directories need read and exec.
+ os.chmod(path, perms | 0555)
+ else:
+ os.chmod(path, perms | 0444)
+
+
+def MakeParentDirectoriesWorldReadable(path):
+ """Changes the permissions of the given path and its parent directories
+ to make them world-readable. Stops on first directory which is
+ world-readable. This is often needed for archive staging directories,
+ so that they can be served by web servers or accessed by unprivileged
+ network users."""
+
+ # No need to do anything special on Windows.
+ if IsWindows():
+ return
+
+ while path != os.path.dirname(path):
+ current_permissions = stat.S_IMODE(os.stat(path)[stat.ST_MODE])
+ if current_permissions & 0555 == 0555:
+ break
+ os.chmod(path, current_permissions | 0555)
+ path = os.path.dirname(path)
+
+
+def MaybeMakeDirectory(*path):
+ """Creates an entire path, if it doesn't already exist."""
+ file_path = os.path.join(*path)
+ try:
+ os.makedirs(file_path)
+ except OSError, e:
+ if e.errno != errno.EEXIST:
+ raise
+
+
+def RemovePath(*path):
+ """Removes the file or directory at 'path', if it exists."""
+ file_path = os.path.join(*path)
+ if os.path.exists(file_path):
+ if os.path.isdir(file_path):
+ RemoveDirectory(file_path)
+ else:
+ RemoveFile(file_path)
+
+
+def RemoveFile(*path):
+ """Removes the file located at 'path', if it exists."""
+ file_path = os.path.join(*path)
+ try:
+ os.remove(file_path)
+ except OSError, e:
+ if e.errno != errno.ENOENT:
+ raise
+
+
+def MoveFile(path, new_path):
+ """Moves the file located at 'path' to 'new_path', if it exists."""
+ try:
+ RemoveFile(new_path)
+ os.rename(path, new_path)
+ except OSError, e:
+ if e.errno != errno.ENOENT:
+ raise
+
+
+def LocateFiles(pattern, root=os.curdir):
+ """Yeilds files matching pattern found in root and its subdirectories.
+
+ An exception is thrown if root doesn't exist."""
+ for path, _, files in os.walk(os.path.abspath(root)):
+ for filename in fnmatch.filter(files, pattern):
+ yield os.path.join(path, filename)
+
+
+def RemoveFilesWildcards(file_wildcard, root=os.curdir):
+ """Removes files matching 'file_wildcard' in root and its subdirectories, if
+ any exists.
+
+ An exception is thrown if root doesn't exist."""
+ for item in LocateFiles(file_wildcard, root):
+ try:
+ os.remove(item)
+ except OSError, e:
+ if e.errno != errno.ENOENT:
+ raise
+
+
+def RemoveGlobbedPaths(path_wildcard, root=os.curdir):
+ """Removes all paths matching 'path_wildcard' beneath root.
+
+ Returns the list of paths removed.
+
+ An exception is thrown if root doesn't exist."""
+ if not os.path.exists(root):
+ raise OSError(2, 'No such file or directory', root)
+
+ full_path_wildcard = os.path.join(path_wildcard, root)
+ paths = glob.glob(full_path_wildcard)
+ for path in paths:
+ # When glob returns directories they end in "/."
+ if path.endswith(os.sep + '.'):
+ path = path[:-2]
+ RemovePath(path)
+ return paths
+
+
+def RemoveDirectory(*path):
+ """Recursively removes a directory, even if it's marked read-only.
+
+ Remove the directory located at *path, if it exists.
+
+ shutil.rmtree() doesn't work on Windows if any of the files or directories
+ are read-only, which svn repositories and some .svn files are. We need to
+ be able to force the files to be writable (i.e., deletable) as we traverse
+ the tree.
+
+ Even with all this, Windows still sometimes fails to delete a file, citing
+ a permission error (maybe something to do with antivirus scans or disk
+ indexing). The best suggestion any of the user forums had was to wait a
+ bit and try again, so we do that too. It's hand-waving, but sometimes it
+ works. :/
+ """
+ file_path = os.path.join(*path)
+ if not os.path.exists(file_path):
+ return
+
+ if sys.platform == 'win32':
+ # Give up and use cmd.exe's rd command.
+ file_path = os.path.normcase(file_path)
+ for _ in xrange(3):
+ print 'RemoveDirectory running %s' % (' '.join(
+ ['cmd.exe', '/c', 'rd', '/q', '/s', file_path]))
+ if not subprocess.call(['cmd.exe', '/c', 'rd', '/q', '/s', file_path]):
+ break
+ print ' Failed'
+ time.sleep(3)
+ return
+
+ def RemoveWithRetry_non_win(rmfunc, path):
+ if os.path.islink(path):
+ return os.remove(path)
+ else:
+ return rmfunc(path)
+
+ remove_with_retry = RemoveWithRetry_non_win
+
+ def RmTreeOnError(function, path, excinfo):
+ r"""This works around a problem whereby python 2.x on Windows has no ability
+ to check for symbolic links. os.path.islink always returns False. But
+ shutil.rmtree will fail if invoked on a symbolic link whose target was
+ deleted before the link. E.g., reproduce like this:
+ > mkdir test
+ > mkdir test\1
+ > mklink /D test\current test\1
+ > python -c "import chromium_utils; chromium_utils.RemoveDirectory('test')"
+ To avoid this issue, we pass this error-handling function to rmtree. If
+ we see the exact sort of failure, we ignore it. All other failures we re-
+ raise.
+ """
+
+ exception_type = excinfo[0]
+ exception_value = excinfo[1]
+ # If shutil.rmtree encounters a symbolic link on Windows, os.listdir will
+ # fail with a WindowsError exception with an ENOENT errno (i.e., file not
+ # found). We'll ignore that error. Note that WindowsError is not defined
+ # for non-Windows platforms, so we use OSError (of which it is a subclass)
+ # to avoid lint complaints about an undefined global on non-Windows
+ # platforms.
+ if (function is os.listdir) and issubclass(exception_type, OSError):
+ if exception_value.errno == errno.ENOENT:
+ # File does not exist, and we're trying to delete, so we can ignore the
+ # failure.
+ print 'WARNING: Failed to list %s during rmtree. Ignoring.\n' % path
+ else:
+ raise
+ else:
+ raise
+
+ for root, dirs, files in os.walk(file_path, topdown=False):
+ # For POSIX: making the directory writable guarantees removability.
+ # Windows will ignore the non-read-only bits in the chmod value.
+ os.chmod(root, 0770)
+ for name in files:
+ remove_with_retry(os.remove, os.path.join(root, name))
+ for name in dirs:
+ remove_with_retry(lambda p: shutil.rmtree(p, onerror=RmTreeOnError),
+ os.path.join(root, name))
+
+ remove_with_retry(os.rmdir, file_path)
+
+
+def CopyFileToDir(src_path, dest_dir, dest_fn=None, link_ok=False):
+ """Copies the file found at src_path to the dest_dir directory, with metadata.
+
+ If dest_fn is specified, the src_path is copied to that name in dest_dir,
+ otherwise it is copied to a file of the same name.
+
+ Raises PathNotFound if either the file or the directory is not found.
+ """
+ # Verify the file and directory separately so we can tell them apart and
+ # raise PathNotFound rather than shutil.copyfile's IOError.
+ if not os.path.isfile(src_path):
+ raise PathNotFound('Unable to find file %s' % src_path)
+ if not os.path.isdir(dest_dir):
+ raise PathNotFound('Unable to find dir %s' % dest_dir)
+ src_file = os.path.basename(src_path)
+ if dest_fn:
+ # If we have ctypes and the caller doesn't mind links, use that to
+ # try to make the copy faster on Windows. http://crbug.com/418702.
+ if link_ok and WIN_LINK_FUNC:
+ WIN_LINK_FUNC(src_path, os.path.join(dest_dir, dest_fn))
+ else:
+ shutil.copy2(src_path, os.path.join(dest_dir, dest_fn))
+ else:
+ shutil.copy2(src_path, os.path.join(dest_dir, src_file))
+
+
+def MakeZip(output_dir, archive_name, file_list, file_relative_dir,
+ raise_error=True, remove_archive_directory=True):
+ """Packs files into a new zip archive.
+
+ Files are first copied into a directory within the output_dir named for
+ the archive_name, which will be created if necessary and emptied if it
+ already exists. The files are then then packed using archive names
+ relative to the output_dir. That is, if the zipfile is unpacked in place,
+ it will create a directory identical to the new archive_name directory, in
+ the output_dir. The zip file will be named as the archive_name, plus
+ '.zip'.
+
+ Args:
+ output_dir: Absolute path to the directory in which the archive is to
+ be created.
+ archive_dir: Subdirectory of output_dir holding files to be added to
+ the new zipfile.
+ file_list: List of paths to files or subdirectories, relative to the
+ file_relative_dir.
+ file_relative_dir: Absolute path to the directory containing the files
+ and subdirectories in the file_list.
+ raise_error: Whether to raise a PathNotFound error if one of the files in
+ the list is not found.
+ remove_archive_directory: Whether to remove the archive staging directory
+ before copying files over to it.
+
+ Returns:
+ A tuple consisting of (archive_dir, zip_file_path), where archive_dir
+ is the full path to the newly created archive_name subdirectory.
+
+ Raises:
+ PathNotFound if any of the files in the list is not found, unless
+ raise_error is False, in which case the error will be ignored.
+ """
+
+ start_time = time.clock()
+ # Collect files into the archive directory.
+ archive_dir = os.path.join(output_dir, archive_name)
+ print 'output_dir: %s, archive_name: %s' % (output_dir, archive_name)
+ print 'archive_dir: %s, remove_archive_directory: %s, exists: %s' % (
+ archive_dir, remove_archive_directory, os.path.exists(archive_dir))
+ if remove_archive_directory and os.path.exists(archive_dir):
+ # Move it even if it's not a directory as expected. This can happen with
+ # FILES.cfg archive creation where we create an archive staging directory
+ # that is the same name as the ultimate archive name.
+ if not os.path.isdir(archive_dir):
+ print 'Moving old "%s" file to create same name directory.' % archive_dir
+ previous_archive_file = '%s.old' % archive_dir
+ MoveFile(archive_dir, previous_archive_file)
+ else:
+ print 'Removing %s' % archive_dir
+ RemoveDirectory(archive_dir)
+ print 'Now, os.path.exists(%s): %s' % (
+ archive_dir, os.path.exists(archive_dir))
+ MaybeMakeDirectory(archive_dir)
+ for needed_file in file_list:
+ needed_file = needed_file.rstrip()
+ # These paths are relative to the file_relative_dir. We need to copy
+ # them over maintaining the relative directories, where applicable.
+ src_path = os.path.join(file_relative_dir, needed_file)
+ dirname, basename = os.path.split(needed_file)
+ try:
+ if os.path.isdir(src_path):
+ if WIN_LINK_FUNC:
+ WIN_LINK_FUNC(src_path, os.path.join(archive_dir, needed_file))
+ else:
+ shutil.copytree(src_path, os.path.join(archive_dir, needed_file),
+ symlinks=True)
+ elif dirname != '' and basename != '':
+ dest_dir = os.path.join(archive_dir, dirname)
+ MaybeMakeDirectory(dest_dir)
+ CopyFileToDir(src_path, dest_dir, basename, link_ok=True)
+ else:
+ CopyFileToDir(src_path, archive_dir, basename, link_ok=True)
+ except PathNotFound:
+ if raise_error:
+ raise
+ end_time = time.clock()
+ print 'Took %f seconds to create archive directory.' % (end_time - start_time)
+
+ # Pack the zip file.
+ output_file = '%s.zip' % archive_dir
+ previous_file = '%s_old.zip' % archive_dir
+ MoveFile(output_file, previous_file)
+
+ # If we have 7z, use that as it's much faster. See http://crbug.com/418702.
+ windows_zip_cmd = None
+ if os.path.exists('C:\\Program Files\\7-Zip\\7z.exe'):
+ windows_zip_cmd = ['C:\\Program Files\\7-Zip\\7z.exe', 'a', '-y', '-mx1']
+
+ # On Windows we use the python zip module; on Linux and Mac, we use the zip
+ # command as it will handle links and file bits (executable). Which is much
+ # easier then trying to do that with ZipInfo options.
+ start_time = time.clock()
+ if IsWindows() and not windows_zip_cmd:
+ print 'Creating %s' % output_file
+
+ def _Addfiles(to_zip_file, dirname, files_to_add):
+ for this_file in files_to_add:
+ archive_name = this_file
+ this_path = os.path.join(dirname, this_file)
+ if os.path.isfile(this_path):
+ # Store files named relative to the outer output_dir.
+ archive_name = this_path.replace(output_dir + os.sep, '')
+ if os.path.getsize(this_path) == 0:
+ compress_method = zipfile.ZIP_STORED
+ else:
+ compress_method = zipfile.ZIP_DEFLATED
+ to_zip_file.write(this_path, archive_name, compress_method)
+ print 'Adding %s' % archive_name
+ zip_file = zipfile.ZipFile(output_file, 'w', zipfile.ZIP_DEFLATED,
+ allowZip64=True)
+ try:
+ os.path.walk(archive_dir, _Addfiles, zip_file)
+ finally:
+ zip_file.close()
+ else:
+ if IsMac() or IsLinux():
+ zip_cmd = ['zip', '-yr1']
+ else:
+ zip_cmd = windows_zip_cmd
+ saved_dir = os.getcwd()
+ os.chdir(os.path.dirname(archive_dir))
+ command = zip_cmd + [output_file, os.path.basename(archive_dir)]
+ result = RunCommand(command)
+ os.chdir(saved_dir)
+ if result and raise_error:
+ raise ExternalError('zip failed: %s => %s' %
+ (str(command), result))
+ end_time = time.clock()
+ print 'Took %f seconds to create zip.' % (end_time - start_time)
+ return (archive_dir, output_file)
+
+
+def ExtractZip(filename, output_dir, verbose=True):
+ """ Extract the zip archive in the output directory.
+ """
+ MaybeMakeDirectory(output_dir)
+
+ # On Linux and Mac, we use the unzip command as it will
+ # handle links and file bits (executable), which is much
+ # easier then trying to do that with ZipInfo options.
+ #
+ # The Mac Version of unzip unfortunately does not support Zip64, whereas
+ # the python module does, so we have to fallback to the python zip module
+ # on Mac if the filesize is greater than 4GB.
+ #
+ # On Windows, try to use 7z if it is installed, otherwise fall back to python
+ # zip module and pray we don't have files larger than 512MB to unzip.
+ unzip_cmd = None
+ if ((IsMac() and os.path.getsize(filename) < 4 * 1024 * 1024 * 1024)
+ or IsLinux()):
+ unzip_cmd = ['unzip', '-o']
+ elif IsWindows() and os.path.exists('C:\\Program Files\\7-Zip\\7z.exe'):
+ unzip_cmd = ['C:\\Program Files\\7-Zip\\7z.exe', 'x', '-y']
+
+ if unzip_cmd:
+ # Make sure path is absolute before changing directories.
+ filepath = os.path.abspath(filename)
+ saved_dir = os.getcwd()
+ os.chdir(output_dir)
+ command = unzip_cmd + [filepath]
+ result = RunCommand(command)
+ os.chdir(saved_dir)
+ if result:
+ raise ExternalError('unzip failed: %s => %s' % (str(command), result))
+ else:
+ assert IsWindows() or IsMac()
+ zf = zipfile.ZipFile(filename)
+ # TODO(hinoka): This can be multiprocessed.
+ for name in zf.namelist():
+ if verbose:
+ print 'Extracting %s' % name
+ zf.extract(name, output_dir)
+ if IsMac():
+ # Restore permission bits.
+ os.chmod(os.path.join(output_dir, name),
+ zf.getinfo(name).external_attr >> 16L)
+
+
+def WindowsPath(path):
+ """Returns a Windows mixed-style absolute path, given a Cygwin absolute path.
+
+ The version of Python in the Chromium tree uses posixpath for os.path even
+ on Windows, so we convert to a mixed Windows path (that is, a Windows path
+ that uses forward slashes instead of backslashes) manually.
+ """
+ # TODO(pamg): make this work for other drives too.
+ if path.startswith('/cygdrive/c/'):
+ return path.replace('/cygdrive/c/', 'C:/')
+ return path
+
+
+def FindUpwardParent(start_dir, *desired_list):
+ """Finds the desired object's parent, searching upward from the start_dir.
+
+ Searches within start_dir and within all its parents looking for the desired
+ directory or file, which may be given in one or more path components. Returns
+ the first directory in which the top desired path component was found, or
+ raises PathNotFound if it wasn't.
+ """
+ desired_path = os.path.join(*desired_list)
+ last_dir = ''
+ cur_dir = start_dir
+ found_path = os.path.join(cur_dir, desired_path)
+ while not os.path.exists(found_path):
+ last_dir = cur_dir
+ cur_dir = os.path.dirname(cur_dir)
+ if last_dir == cur_dir:
+ raise PathNotFound('Unable to find %s above %s' %
+ (desired_path, start_dir))
+ found_path = os.path.join(cur_dir, desired_path)
+ # Strip the entire original desired path from the end of the one found
+ # and remove a trailing path separator, if present.
+ found_path = found_path[:len(found_path) - len(desired_path)]
+ if found_path.endswith(os.sep):
+ found_path = found_path[:len(found_path) - 1]
+ return found_path
+
+
+def FindUpward(start_dir, *desired_list):
+ """Returns a path to the desired directory or file, searching upward.
+
+ Searches within start_dir and within all its parents looking for the desired
+ directory or file, which may be given in one or more path components. Returns
+ the full path to the desired object, or raises PathNotFound if it wasn't
+ found.
+ """
+ parent = FindUpwardParent(start_dir, *desired_list)
+ return os.path.join(parent, *desired_list)
+
+
+def RunAndPrintDots(function):
+ """Starts a background thread that prints dots while the function runs."""
+
+ def Hook(*args, **kwargs):
+ event = threading.Event()
+
+ def PrintDots():
+ counter = 0
+ while not event.isSet():
+ event.wait(5)
+ sys.stdout.write('.')
+ counter = (counter + 1) % 80
+ if not counter:
+ sys.stdout.write('\n')
+ sys.stdout.flush()
+ t = threading.Thread(target=PrintDots)
+ t.start()
+ try:
+ return function(*args, **kwargs)
+ finally:
+ event.set()
+ t.join()
+ return Hook
+
+
+class RunCommandFilter(object):
+ """Class that should be subclassed to provide a filter for RunCommand."""
+ # Method could be a function
+ # pylint: disable=R0201
+
+ def FilterLine(self, a_line):
+ """Called for each line of input. The \n is included on a_line. Should
+ return what is to be recorded as the output for this line. A result of
+ None suppresses the line."""
+ return a_line
+
+ def FilterDone(self, last_bits):
+ """Acts just like FilterLine, but is called with any data collected after
+ the last newline of the command."""
+ return last_bits
+
+
+class FilterCapture(RunCommandFilter):
+ """Captures the text and places it into an array."""
+ def __init__(self):
+ RunCommandFilter.__init__(self)
+ self.text = []
+
+ def FilterLine(self, line):
+ self.text.append(line.rstrip())
+
+ def FilterDone(self, text):
+ self.text.append(text)
+
+
+def RunCommand(command, parser_func=None, filter_obj=None, pipes=None,
+ print_cmd=True, timeout=None, max_time=None, **kwargs):
+ """Runs the command list, printing its output and returning its exit status.
+
+ Prints the given command (which should be a list of one or more strings),
+ then runs it and writes its stdout and stderr to the appropriate file handles.
+
+ If timeout is set, the process will be killed if output is stopped after
+ timeout seconds. If max_time is set, the process will be killed if it runs for
+ more than max_time.
+
+ If parser_func is not given, the subprocess's output is passed to stdout
+ and stderr directly. If the func is given, each line of the subprocess's
+ stdout/stderr is passed to the func and then written to stdout.
+
+ If filter_obj is given, all output is run through the filter a line
+ at a time before it is written to stdout.
+
+ We do not currently support parsing stdout and stderr independent of
+ each other. In previous attempts, this led to output ordering issues.
+ By merging them when either needs to be parsed, we avoid those ordering
+ issues completely.
+
+ pipes is a list of commands (also a list) that will receive the output of
+ the intial command. For example, if you want to run "python a | python b | c",
+ the "command" will be set to ['python', 'a'], while pipes will be set to
+ [['python', 'b'],['c']]
+ """
+
+ def TimedFlush(timeout, fh, kill_event):
+ """Flush fh every timeout seconds until kill_event is true."""
+ while True:
+ try:
+ fh.flush()
+ # File handle is closed, exit.
+ except ValueError:
+ break
+ # Wait for kill signal or timeout.
+ if kill_event.wait(timeout):
+ break
+
+ # TODO(all): nsylvain's CommandRunner in buildbot_slave is based on this
+ # method. Update it when changes are introduced here.
+ def ProcessRead(readfh, writefh, parser_func=None, filter_obj=None,
+ log_event=None):
+ writefh.flush()
+
+ # Python on Windows writes the buffer only when it reaches 4k. Ideally
+ # we would flush a minimum of 10 seconds. However, we only write and
+ # flush no more often than 20 seconds to avoid flooding the master with
+ # network traffic from unbuffered output.
+ kill_event = threading.Event()
+ flush_thread = threading.Thread(
+ target=TimedFlush, args=(20, writefh, kill_event))
+ flush_thread.daemon = True
+ flush_thread.start()
+
+ try:
+ in_byte = readfh.read(1)
+ in_line = cStringIO.StringIO()
+ while in_byte:
+ # Capture all characters except \r.
+ if in_byte != '\r':
+ in_line.write(in_byte)
+
+ # Write and flush on newline.
+ if in_byte == '\n':
+ if log_event:
+ log_event.set()
+ if parser_func:
+ parser_func(in_line.getvalue().strip())
+
+ if filter_obj:
+ filtered_line = filter_obj.FilterLine(in_line.getvalue())
+ if filtered_line is not None:
+ writefh.write(filtered_line)
+ else:
+ writefh.write(in_line.getvalue())
+ in_line = cStringIO.StringIO()
+ in_byte = readfh.read(1)
+
+ if log_event and in_line.getvalue():
+ log_event.set()
+
+ # Write remaining data and flush on EOF.
+ if parser_func:
+ parser_func(in_line.getvalue().strip())
+
+ if filter_obj:
+ if in_line.getvalue():
+ filtered_line = filter_obj.FilterDone(in_line.getvalue())
+ if filtered_line is not None:
+ writefh.write(filtered_line)
+ else:
+ if in_line.getvalue():
+ writefh.write(in_line.getvalue())
+ finally:
+ kill_event.set()
+ flush_thread.join()
+ writefh.flush()
+
+ pipes = pipes or []
+
+ # Print the given command (which should be a list of one or more strings).
+ if print_cmd:
+ print '\n' + subprocess.list2cmdline(command) + '\n',
+ for pipe in pipes:
+ print ' | ' + subprocess.list2cmdline(pipe) + '\n',
+
+ sys.stdout.flush()
+ sys.stderr.flush()
+
+ if not (parser_func or filter_obj or pipes or timeout or max_time):
+ # Run the command. The stdout and stderr file handles are passed to the
+ # subprocess directly for writing. No processing happens on the output of
+ # the subprocess.
+ proc = subprocess.Popen(command, stdout=sys.stdout, stderr=sys.stderr,
+ bufsize=0, **kwargs)
+
+ else:
+ if not (parser_func or filter_obj):
+ filter_obj = RunCommandFilter()
+
+ # Start the initial process.
+ proc = subprocess.Popen(command, stdout=subprocess.PIPE,
+ stderr=subprocess.STDOUT, bufsize=0, **kwargs)
+ proc_handles = [proc]
+
+ if pipes:
+ pipe_number = 0
+ for pipe in pipes:
+ pipe_number = pipe_number + 1
+ if pipe_number == len(pipes) and not (parser_func or filter_obj):
+ # The last pipe process needs to output to sys.stdout or filter
+ stdout = sys.stdout
+ else:
+ # Output to a pipe, since another pipe is on top of us.
+ stdout = subprocess.PIPE
+ pipe_proc = subprocess.Popen(pipe, stdin=proc_handles[0].stdout,
+ stdout=stdout, stderr=subprocess.STDOUT)
+ proc_handles.insert(0, pipe_proc)
+
+ # Allow proc to receive a SIGPIPE if the piped process exits.
+ for handle in proc_handles[1:]:
+ handle.stdout.close()
+
+ log_event = threading.Event()
+
+ # Launch and start the reader thread.
+ thread = threading.Thread(target=ProcessRead,
+ args=(proc_handles[0].stdout, sys.stdout),
+ kwargs={'parser_func': parser_func,
+ 'filter_obj': filter_obj,
+ 'log_event': log_event})
+
+ kill_lock = threading.Lock()
+
+
+ def term_then_kill(handle, initial_timeout, numtimeouts, interval):
+ def timed_check():
+ for _ in range(numtimeouts):
+ if handle.poll() is not None:
+ return True
+ time.sleep(interval)
+
+ handle.terminate()
+ time.sleep(initial_timeout)
+ timed_check()
+ if handle.poll() is None:
+ handle.kill()
+ timed_check()
+ return handle.poll() is not None
+
+
+ def kill_proc(proc_handles, message=None):
+ with kill_lock:
+ if proc_handles:
+ killed = term_then_kill(proc_handles[0], 0.1, 5, 1)
+
+ if message:
+ print >> sys.stderr, message
+
+ if not killed:
+ print >> sys.stderr, 'could not kill pid %d!' % proc_handles[0].pid
+ else:
+ print >> sys.stderr, 'program finished with exit code %d' % (
+ proc_handles[0].returncode)
+
+ # Prevent other timeouts from double-killing.
+ del proc_handles[:]
+
+ def timeout_func(timeout, proc_handles, log_event, finished_event):
+ while log_event.wait(timeout):
+ log_event.clear()
+ if finished_event.is_set():
+ return
+
+ message = ('command timed out: %d seconds without output, attempting to '
+ 'kill' % timeout)
+ kill_proc(proc_handles, message)
+
+ def maxtimeout_func(timeout, proc_handles, finished_event):
+ if not finished_event.wait(timeout):
+ message = ('command timed out: %d seconds elapsed' % timeout)
+ kill_proc(proc_handles, message)
+
+ timeout_thread = None
+ maxtimeout_thread = None
+ finished_event = threading.Event()
+
+ if timeout:
+ timeout_thread = threading.Thread(target=timeout_func,
+ args=(timeout, proc_handles, log_event,
+ finished_event))
+ timeout_thread.daemon = True
+ if max_time:
+ maxtimeout_thread = threading.Thread(target=maxtimeout_func,
+ args=(max_time, proc_handles,
+ finished_event))
+ maxtimeout_thread.daemon = True
+
+ thread.start()
+ if timeout_thread:
+ timeout_thread.start()
+ if maxtimeout_thread:
+ maxtimeout_thread.start()
+
+ # Wait for the commands to terminate.
+ for handle in proc_handles:
+ handle.wait()
+
+ # Wake up timeout threads.
+ finished_event.set()
+ log_event.set()
+
+ # Wait for the reader thread to complete (implies EOF reached on stdout/
+ # stderr pipes).
+ thread.join()
+
+ # Check whether any of the sub commands has failed.
+ for handle in proc_handles:
+ if handle.returncode:
+ return handle.returncode
+
+ # Wait for the command to terminate.
+ proc.wait()
+ return proc.returncode
+
+
+def GetStatusOutput(command, **kwargs):
+ """Runs the command list, returning its result and output."""
+ proc = subprocess.Popen(command, stdout=subprocess.PIPE,
+ stderr=subprocess.STDOUT, bufsize=1,
+ **kwargs)
+ output = proc.communicate()[0]
+ result = proc.returncode
+
+ return (result, output)
+
+
+def GetCommandOutput(command):
+ """Runs the command list, returning its output.
+
+ Run the command and returns its output (stdout and stderr) as a string.
+
+ If the command exits with an error, raises ExternalError.
+ """
+ (result, output) = GetStatusOutput(command)
+ if result:
+ raise ExternalError('%s: %s' % (subprocess.list2cmdline(command), output))
+ return output
+
+
+def GetGClientCommand(platform=None):
+ """Returns the executable command name, depending on the platform.
+ """
+ if not platform:
+ platform = sys.platform
+ if platform.startswith('win'):
+ # Windows doesn't want to depend on bash.
+ return 'gclient.bat'
+ else:
+ return 'gclient'
+
+
+# Linux scripts use ssh to to move files to the archive host.
+def SshMakeDirectory(host, dest_path):
+ """Creates the entire dest_path on the remote ssh host.
+ """
+ command = ['ssh', host, 'mkdir', '-p', dest_path]
+ result = RunCommand(command)
+ if result:
+ raise ExternalError('Failed to ssh mkdir "%s" on "%s" (%s)' %
+ (dest_path, host, result))
+
+
+def SshMoveFile(host, src_path, dest_path):
+ """Moves src_path (if it exists) to dest_path on the remote host.
+ """
+ command = ['ssh', host, 'test', '-e', src_path]
+ result = RunCommand(command)
+ if result:
+ # Nothing to do if src_path doesn't exist.
+ return result
+
+ command = ['ssh', host, 'mv', src_path, dest_path]
+ result = RunCommand(command)
+ if result:
+ raise ExternalError('Failed to ssh mv "%s" -> "%s" on "%s" (%s)' %
+ (src_path, dest_path, host, result))
+
+
+def SshCopyFiles(srcs, host, dst):
+ """Copies the srcs file(s) to dst on the remote ssh host.
+ dst is expected to exist.
+ """
+ command = ['scp', srcs, host + ':' + dst]
+ result = RunCommand(command)
+ if result:
+ raise ExternalError('Failed to scp "%s" to "%s" (%s)' %
+ (srcs, host + ':' + dst, result))
+
+
+def SshExtractZip(host, zipname, dst):
+ """extract the remote zip file to dst on the remote ssh host.
+ """
+ command = ['ssh', host, 'unzip', '-o', '-d', dst, zipname]
+ result = RunCommand(command)
+ if result:
+ raise ExternalError('Failed to ssh unzip -o -d "%s" "%s" on "%s" (%s)' %
+ (dst, zipname, host, result))
+
+ # unzip will create directories with access 700, which is not often what we
+ # need. Fix the permissions for the whole archive.
+ command = ['ssh', host, 'chmod', '-R', '755', dst]
+ result = RunCommand(command)
+ if result:
+ raise ExternalError('Failed to ssh chmod -R 755 "%s" on "%s" (%s)' %
+ (dst, host, result))
+
+
+def SshCopyTree(srctree, host, dst):
+ """Recursively copies the srctree to dst on the remote ssh host.
+ For consistency with shutil, dst is expected to not exist.
+ """
+ command = ['ssh', host, '[ -d "%s" ]' % dst]
+ result = RunCommand(command)
+ if result:
+ raise ExternalError('SshCopyTree destination directory "%s" already exists.'
+ % host + ':' + dst)
+
+ SshMakeDirectory(host, os.path.dirname(dst))
+ command = ['scp', '-r', '-p', srctree, host + ':' + dst]
+ result = RunCommand(command)
+ if result:
+ raise ExternalError('Failed to scp "%s" to "%s" (%s)' %
+ (srctree, host + ':' + dst, result))
+
+
+def ListMasters(cue='master.cfg', include_public=True, include_internal=True):
+ """Returns all the masters found."""
+ # Look for "internal" masters first.
+ path_internal = os.path.join(
+ BUILD_DIR, os.pardir, 'build_internal', 'masters/*/' + cue)
+ path = os.path.join(BUILD_DIR, 'masters/*/' + cue)
+ filenames = []
+ if include_public:
+ filenames += glob.glob(path)
+ if include_internal:
+ filenames += glob.glob(path_internal)
+ return [os.path.abspath(os.path.dirname(f)) for f in filenames]
+
+
+def MasterPath(mastername, include_public=True, include_internal=True):
+ path = os.path.join(BUILD_DIR, 'masters', 'master.%s' % mastername)
+ path_internal = os.path.join(
+ BUILD_DIR, os.pardir, 'build_internal', 'masters',
+ 'master.%s' % mastername)
+ if include_public and os.path.isdir(path):
+ return path
+ if include_internal and os.path.isdir(path_internal):
+ return path_internal
+ raise LookupError('Path for master %s not found' % mastername)
+
+
+def ListMastersWithSlaves(include_public=True, include_internal=True):
+ masters_path = ListMasters('builders.pyl', include_public, include_internal)
+ masters_path.extend(ListMasters('slaves.cfg', include_public,
+ include_internal))
+ return masters_path
+
+
+def GetSlavesFromMasterPath(path, fail_hard=False):
+ builders_path = os.path.join(path, 'builders.pyl')
+ if os.path.exists(builders_path):
+ return GetSlavesFromBuildersFile(builders_path)
+ return RunSlavesCfg(os.path.join(path, 'slaves.cfg'), fail_hard=fail_hard)
+
+
+def GetAllSlaves(fail_hard=False, include_public=True, include_internal=True):
+ """Return all slave objects from masters."""
+ slaves = []
+ for master in ListMastersWithSlaves(include_public, include_internal):
+ cur_slaves = GetSlavesFromMasterPath(master, fail_hard)
+ for slave in cur_slaves:
+ slave['mastername'] = os.path.basename(master)
+ slaves.extend(cur_slaves)
+ return slaves
+
+
+def GetSlavesForHost():
+ """Get slaves for a host, defaulting to current host."""
+ hostname = os.getenv('TESTING_SLAVENAME')
+ if not hostname:
+ hostname = socket.getfqdn().split('.', 1)[0].lower()
+ return [s for s in GetAllSlaves() if s.get('hostname') == hostname]
+
+
+def GetActiveSubdir():
+ """Get current checkout's subdir, if checkout uses subdir layout."""
+ rootdir, subdir = os.path.split(os.path.dirname(BUILD_DIR))
+ if subdir != 'b' and os.path.basename(rootdir) == 'c':
+ return subdir
+
+
+def GetActiveSlavename():
+ slavename = os.getenv('TESTING_SLAVENAME')
+ if not slavename:
+ slavename = socket.getfqdn().split('.', 1)[0].lower()
+ subdir = GetActiveSubdir()
+ if subdir:
+ return '%s#%s' % (slavename, subdir)
+ return slavename
+
+
+def EntryToSlaveName(entry):
+ """Produces slave name from the slaves config dict."""
+ name = entry.get('slavename') or entry.get('hostname')
+ if 'subdir' in entry:
+ return '%s#%s' % (name, entry['subdir'])
+ return name
+
+
+def GetActiveMaster(slavename=None, default=None):
+ """Returns the name of the Active master serving the current host.
+
+ Parse all of the active masters with slaves matching the current hostname
+ and optional slavename. Returns |default| if no match found.
+ """
+ slavename = slavename or GetActiveSlavename()
+ for slave in GetAllSlaves():
+ if slavename == EntryToSlaveName(slave):
+ return slave['master']
+ return default
+
+
+@contextmanager
+def MasterEnvironment(master_dir):
+ """Context manager that enters an enviornment similar to a master's.
+
+ This involves:
+ - Modifying 'sys.path' to include paths available to the master.
+ - Changing directory (via os.chdir()) to the master's base directory.
+
+ These changes will be reverted after the context manager completes.
+
+ Args:
+ master_dir: (str) The master's base directory.
+ """
+ master_dir = os.path.abspath(master_dir)
+
+ # Setup a 'sys.path' that is adequate for loading 'slaves.cfg'.
+ old_cwd = os.getcwd()
+
+ with env.GetInfraPythonPath(master_dir=master_dir).Enter():
+ try:
+ os.chdir(master_dir)
+ yield
+ finally:
+ os.chdir(old_cwd)
+
+
+def ParsePythonCfg(cfg_filepath, fail_hard=False):
+ """Retrieves data from a python config file."""
+ if not os.path.exists(cfg_filepath):
+ return None
+
+ # Execute 'slaves.sfg' in the master path environment.
+ with MasterEnvironment(os.path.dirname(os.path.abspath(cfg_filepath))):
+ try:
+ local_vars = {}
+ execfile(os.path.join(cfg_filepath), local_vars)
+ del local_vars['__builtins__']
+ return local_vars
+ except Exception as e:
+ # pylint: disable=C0323
+ print >>sys.stderr, 'An error occurred while parsing %s: %s' % (
+ cfg_filepath, e)
+ print >>sys.stderr, traceback.format_exc() # pylint: disable=C0323
+ if fail_hard:
+ raise
+ return {}
+
+
+def RunSlavesCfg(slaves_cfg, fail_hard=False):
+ """Runs slaves.cfg in a consistent way."""
+ slave_config = ParsePythonCfg(slaves_cfg, fail_hard=fail_hard) or {}
+ return slave_config.get('slaves', [])
+
+
+def convert_json(option, _, value, parser):
+ """Provide an OptionParser callback to unmarshal a JSON string."""
+ setattr(parser.values, option.dest, json.loads(value))
+
+
+def b64_gz_json_encode(obj):
+ """Serialize a python object into base64."""
+ # The |separators| argument is to densify the command line.
+ return base64.b64encode(zlib.compress(
+ json.dumps(obj or {}, sort_keys=True, separators=(',', ':')), 9))
+
+
+def convert_gz_json(option, _, value, parser):
+ """Provide an OptionParser callback to unmarshal a b64 gz JSON string."""
+ setattr(
+ parser.values, option.dest,
+ json.loads(zlib.decompress(base64.b64decode(value))))
+
+
+def SafeTranslate(inputstr):
+ """Convert a free form string to one that can be used in a path.
+
+ This is similar to the safeTranslate function in buildbot.
+ """
+
+ badchars_map = string.maketrans('\t !#$%&\'()*+,./:;<=>?@[\\]^{|}~',
+ '______________________________')
+ if isinstance(inputstr, unicode):
+ inputstr = inputstr.encode('utf8')
+ return inputstr.translate(badchars_map)
+
+
+def GetPrimaryProject(options):
+ """Returns: (str) the key of the primary project, or 'None' if none exists.
+ """
+ # The preferred way is to reference the 'primary_project' parameter.
+ result = options.build_properties.get('primary_project')
+ if result:
+ return result
+
+ # TODO(dnj): The 'primary_repo' parameter is used by some scripts to indictate
+ # the primary project name. This is not consistently used and will be
+ # deprecated in favor of 'primary_project' once that is rolled out.
+ result = options.build_properties.get('primary_repo')
+ if not result:
+ # The 'primary_repo' property currently contains a trailing underscore.
+ # However, this isn't an obvious thing given its name, so we'll strip it
+ # here and remove that expectation.
+ return result.strip('_')
+ return None
+
+
+def GetBuildSortKey(options, project=None):
+ """Reads a variety of sources to determine the current build revision.
+
+ NOTE: Currently, the return value does not qualify branch name. This can
+ present a problem with git numbering scheme, where numbers are only unique
+ in the context of their respective branches. When this happens, this
+ function will return a branch name as part of the sort key and its callers
+ will need to adapt their naming/querying schemes to accommodate this. Until
+ then, we will return 'None' as the branch name.
+ (e.g., refs/foo/bar@{#12345} => ("refs/foo/bar", 12345)
+
+ Args:
+ options: Command-line options structure
+ project: (str/None) If not None, the project to get the build sort key
+ for. Otherwise, the build-wide sort key will be used.
+ Returns: (branch, value) The qualified sortkey value
+ branch: (str/None) The name of the branch, or 'None' if there is no branch
+ context. Currently this always returns 'None'.
+ value: (int) The iteration value within the specified branch
+ Raises: (NoIdentifiedRevision) if no revision could be identified from the
+ supplied options.
+ """
+ # Is there a commit position for this build key?
+ try:
+ return GetCommitPosition(options, project=project)
+ except NoIdentifiedRevision:
+ pass
+
+ # Nope; derive the sort key from the 'got_[*_]revision' build properties. Note
+ # that this could be a Git commit (post flag day).
+ if project:
+ revision_key = 'got_%s_revision' % (project,)
+ else:
+ revision_key = 'got_revision'
+ revision = options.build_properties.get(revision_key)
+ if revision and not IsGitCommit(revision):
+ return None, int(revision)
+ raise NoIdentifiedRevision("Unable to identify revision for revision key "
+ "[%s]" % (revision_key,))
+
+
+def GetGitCommit(options, project=None):
+ """Returns the 'git' commit hash for the specified repository
+
+ This function uses environmental options to identify the 'git' commit hash
+ for the specified repository.
+
+ Args:
+ options: Command-line options structure
+ project: (str/None) The project key to use. If None, use the topmost
+ repository identification properties.
+ Raises: (NoIdentifiedRevision) if no git commit could be identified from the
+ supplied options.
+ """
+ if project:
+ git_commit_key = 'got_%s_revision_git' % (project,)
+ else:
+ git_commit_key = 'got_revision_git'
+ commit = options.build_properties.get(git_commit_key)
+ if commit:
+ return commit
+
+ # Is 'got_[_*]revision' itself is the Git commit?
+ if project:
+ commit_key = 'got_%s_revision' % (project,)
+ else:
+ commit_key = 'got_revision'
+ commit = options.build_properties.get(commit_key)
+ if commit and IsGitCommit(commit):
+ return commit
+ raise NoIdentifiedRevision("Unable to identify commit for commit key: %s" % (
+ (git_commit_key, commit_key),))
+
+
+def GetSortableUploadPathForSortKey(branch, value, delimiter=None):
+ """Returns: (str) the canonical sort key path constructed from a sort key.
+
+ Returns a canonical sort key path for a sort key. The result will be one of
+ the following forms:
+ - (Without Branch or With Branch=='refs/heads/master'): <value> (e.g., 12345)
+ - (With non-Master Branch): <branch-path>-<value> (e.g.,
+ "refs_my-branch-12345")
+
+ When a 'branch' is supplied, it is converted to a path-suitable form. This
+ conversion replaces undesirable characters ('/') with underscores.
+
+ Note that when parsing the upload path, 'rsplit' should be used to isolate the
+ commit position value, as the branch path may have instances of the delimiter
+ in it.
+
+ See 'GetBuildSortKey' for more information about sort keys.
+
+ Args:
+ branch: (str/None) The sort key branch, or 'None' if there is no associated
+ branch.
+ value: (int) The sort key value.
+ delimiter: (str) The delimiter to insert in between <branch-path> and
+ <value> when constructing the branch-inclusive form. If omitted
+ (default), a hyphen ('-') will be used.
+ """
+ if branch and branch != 'refs/heads/master':
+ delimiter = delimiter or '-'
+ branch = branch.replace('/', '_')
+ return '%s%s%s' % (branch, delimiter, value)
+ return str(value)
+
+
+def ParseCommitPosition(value):
+ """Returns: The (branch, value) parsed from a commit position string.
+
+ Args:
+ value: (str) The value to parse.
+ Raises:
+ ValueError: If a commit position could not be parsed from 'value'.
+ """
+ match = COMMIT_POSITION_RE.match(value)
+ if not match:
+ raise ValueError("Failed to parse commit position from '%s'" % (value,))
+ return match.group(1), int(match.group(2))
+
+
+def BuildCommitPosition(branch, value):
+ """Returns: A constructed commit position.
+
+ An example commit position for branch 'refs/heads/master' value '12345' is:
+ refs/heads/master@{#12345}
+
+ This value can be parsed via 'ParseCommitPosition'.
+
+ Args:
+ branch: (str) The name of the commit position branch
+ value: (int): The commit position number.
+ """
+ return '%s@{#%s}' % (branch, value)
+
+
+def GetCommitPosition(options, project=None):
+ """Returns: (branch, value) The parsed commit position from build options.
+
+ Returns the parsed commit position from the build options. This is identified
+ by examining the 'got_revision_cp' (or 'got_REPO_revision_cp', if 'project' is
+ specified) keys.
+
+ Args:
+ options: Command-line options structure
+ project: (str/None) If not None, the project to get the build sort key
+ for. Otherwise, the build-wide sort key will be used.
+ Returns: (branch, value) The qualified commit position value
+ Raises:
+ NoIdentifiedRevision: if no revision could be identified from the
+ supplied options.
+ ValueError: If the supplied commit position failed to parse successfully.
+ """
+ if project:
+ key = 'got_%s_revision_cp' % (project,)
+ else:
+ key = 'got_revision_cp'
+ cp = options.build_properties.get(key)
+ if not cp:
+ raise NoIdentifiedRevision("Unable to identify the commit position; the "
+ "build property is missing: %s" % (key,))
+ return ParseCommitPosition(cp)
+
+
+def AddPropertiesOptions(option_parser):
+ """Registers command line options for parsing build and factory properties.
+
+ After parsing, the options object will have the 'build_properties' and
+ 'factory_properties' attributes. The corresponding values will be python
+ dictionaries containing the properties. If the options are not given on
+ the command line, the dictionaries will be empty.
+
+ Args:
+ option_parser: An optparse.OptionParser to register command line options
+ for build and factory properties.
+ """
+ option_parser.add_option('--build-properties', action='callback',
+ callback=convert_json, type='string',
+ nargs=1, default={},
+ help='build properties in JSON format')
+ option_parser.add_option('--factory-properties', action='callback',
+ callback=convert_json, type='string',
+ nargs=1, default={},
+ help='factory properties in JSON format')
+
+
+def AddThirdPartyLibToPath(lib, override=False):
+ """Adds the specified dir in build/third_party to sys.path.
+
+ Setting 'override' to true will place the directory in the beginning of
+ sys.path, useful for overriding previously set packages.
+
+ NOTE: We would like to deprecate this method, as it allows (encourages?)
+ scripts to define their own one-off Python path sequences, creating a
+ difficult-to-manage state where different scripts and libraries have
+ different path expectations. Please don't use this method if possible;
+ it preferred to augment 'common.env' instead.
+ """
+ libpath = os.path.abspath(os.path.join(BUILD_DIR, 'third_party', lib))
+ if override:
+ sys.path.insert(0, libpath)
+ else:
+ sys.path.append(libpath)
+
+
+def GetLKGR():
+ """Connect to chromium LKGR server and get LKGR revision.
+
+ On success, returns the LKGR and 'ok'. On error, returns None and the text of
+ the error message.
+ """
+
+ try:
+ conn = urllib.urlopen('https://chromium-status.appspot.com/lkgr')
+ except IOError:
+ return (None, 'Error connecting to LKGR server! Is your internet '
+ 'connection working properly?')
+ try:
+ rev = int('\n'.join(conn.readlines()))
+ except IOError:
+ return (None, 'Error connecting to LKGR server! Is your internet '
+ 'connection working properly?')
+ except ValueError:
+ return None, 'LKGR server returned malformed data! Aborting...'
+ finally:
+ conn.close()
+
+ return rev, 'ok'
+
+
+def AbsoluteCanonicalPath(*path):
+ """Return the most canonical path Python can provide."""
+
+ file_path = os.path.join(*path)
+ return os.path.realpath(os.path.abspath(os.path.expanduser(file_path)))
+
+
+def IsolatedImportFromPath(path, extra_paths=None):
+ dir_path, module_file = os.path.split(path)
+ module_file = os.path.splitext(module_file)[0]
+
+ saved = sys.path
+ sys.path = [dir_path] + (extra_paths or [])
+ try:
+ return __import__(module_file)
+ except ImportError:
+ pass
+ finally:
+ sys.path = saved
+
+
+@contextmanager
+def MultiPool(processes):
+ """Manages a multiprocessing.Pool making sure to close the pool when done.
+
+ This will also call pool.terminate() when an exception is raised (and
+ re-raised the exception to the calling procedure can handle it).
+ """
+ try:
+ pool = multiprocessing.Pool(processes=processes)
+ yield pool
+ pool.close()
+ except:
+ pool.terminate()
+ raise
+ finally:
+ pool.join()
+
+
+def ReadJsonAsUtf8(filename=None, text=None):
+ """Read a json file or string and output a dict.
+
+ This function is different from json.load and json.loads in that it
+ returns utf8-encoded string for keys and values instead of unicode.
+
+ Args:
+ filename: path of a file to parse
+ text: json string to parse
+
+ If both 'filename' and 'text' are provided, 'filename' is used.
+ """
+ def _decode_list(data):
+ rv = []
+ for item in data:
+ if isinstance(item, unicode):
+ item = item.encode('utf-8')
+ elif isinstance(item, list):
+ item = _decode_list(item)
+ elif isinstance(item, dict):
+ item = _decode_dict(item)
+ rv.append(item)
+ return rv
+
+ def _decode_dict(data):
+ rv = {}
+ for key, value in data.iteritems():
+ if isinstance(key, unicode):
+ key = key.encode('utf-8')
+ if isinstance(value, unicode):
+ value = value.encode('utf-8')
+ elif isinstance(value, list):
+ value = _decode_list(value)
+ elif isinstance(value, dict):
+ value = _decode_dict(value)
+ rv[key] = value
+ return rv
+
+ if filename:
+ with open(filename, 'rb') as f:
+ return json.load(f, object_hook=_decode_dict)
+ if text:
+ return json.loads(text, object_hook=_decode_dict)
+
+
+def GetMasterDevParameters(filename='master_cfg_params.json'):
+ """Look for master development parameter files in the master directory.
+
+ Return the parsed content if the file exists, as a dictionary.
+ Every string value in the dictionary is utf8-encoded str.
+
+ If the file is not found, returns an empty dict. This is on purpose, to
+ make the file optional.
+ """
+ if os.path.isfile(filename):
+ return ReadJsonAsUtf8(filename=filename)
+ return {}
+
+
+def FileExclusions():
+ all_platforms = ['.landmines', 'obj', 'gen', '.ninja_deps', '.ninja_log']
+ # Skip files that the testers don't care about. Mostly directories.
+ if IsWindows():
+ # Remove obj or lib dir entries
+ return all_platforms + ['cfinstaller_archive', 'lib', 'installer_archive']
+ if IsMac():
+ return all_platforms + [
+ # We don't need the arm bits v8 builds.
+ 'd8_arm', 'v8_shell_arm',
+ # pdfsqueeze is a build helper, no need to copy it to testers.
+ 'pdfsqueeze',
+ # We copy the framework into the app bundle, we don't need the second
+ # copy outside the app.
+ # TODO(mark): Since r28431, the copy in the build directory is actually
+ # used by tests. Putting two copies in the .zip isn't great, so maybe
+ # we can find another workaround.
+ # 'Chromium Framework.framework',
+ # 'Google Chrome Framework.framework',
+ # We copy the Helper into the app bundle, we don't need the second
+ # copy outside the app.
+ 'Chromium Helper.app',
+ 'Google Chrome Helper.app',
+ 'App Shim Socket',
+ '.deps', 'obj.host', 'obj.target', 'lib'
+ ]
+ if IsLinux():
+ return all_platforms + [
+ # intermediate build directories (full of .o, .d, etc.).
+ 'appcache', 'glue', 'lib.host', 'obj.host',
+ 'obj.target', 'src', '.deps',
+ # scons build cruft
+ '.sconsign.dblite',
+ # build helper, not needed on testers
+ 'mksnapshot',
+ ]
+
+ return all_platforms
+
+
+def DatabaseSetup(buildmaster_config, require_dbconfig=False):
+ """Read database credentials in the master directory."""
+ if os.path.isfile('.dbconfig'):
+ values = {}
+ execfile('.dbconfig', values)
+ if 'password' not in values:
+ raise Exception('could not get db password')
+
+ buildmaster_config['db_url'] = 'postgresql://%s:%s@%s/%s' % (
+ values['username'], values['password'],
+ values.get('hostname', 'localhost'), values['dbname'])
+ else:
+ assert not require_dbconfig
+
+
+def ReadBuildersFile(builders_path):
+ with open(builders_path) as fp:
+ contents = fp.read()
+ return ParseBuildersFileContents(builders_path, contents)
+
+
+def ParseBuildersFileContents(path, contents):
+ builders = ast.literal_eval(contents)
+
+ # Set some additional derived fields that are derived from the
+ # file's location in the filesystem.
+ basedir = os.path.dirname(os.path.abspath(path))
+ master_dirname = os.path.basename(basedir)
+ master_name_comps = master_dirname.split('.')[1:]
+ buildbot_path = '.'.join(master_name_comps)
+ master_classname = ''.join(c[0].upper() + c[1:] for c in master_name_comps)
+ builders['master_dirname'] = master_dirname
+ builders.setdefault('master_classname', master_classname)
+ builders.setdefault('buildbot_url',
+ 'https://build.chromium.org/p/%s/' % buildbot_path)
+
+ builders.setdefault('buildbucket_bucket', None)
+ builders.setdefault('service_account_file', None)
+
+ # The _str fields are printable representations of Python values:
+ # if builders['foo'] == "hello", then builders['foo_str'] == "'hello'".
+ # This allows them to be read back in by Python scripts properly.
+ builders['buildbucket_bucket_str'] = repr(builders['buildbucket_bucket'])
+ builders['service_account_file_str'] = repr(builders['service_account_file'])
+
+ return builders
+
+
+def GetSlavesFromBuildersFile(builders_path):
+ """Read builders_path and return a list of slave dicts."""
+ builders = ReadBuildersFile(builders_path)
+ return GetSlavesFromBuilders(builders)
+
+
+def GetSlavesFromBuilders(builders):
+ """Returns a list of slave dicts derived from the builders dict."""
+ builders_in_pool = {}
+
+ # builders.pyl contains a list of builders -> slave_pools
+ # and a list of slave_pools -> slaves.
+ # We require that each slave is in a single pool, but each slave
+ # may have multiple builders, so we need to build up the list of
+ # builders each slave pool supports.
+ for builder_name, builder_vals in builders['builders'].items():
+ pool_names = builder_vals['slave_pools']
+ for pool_name in pool_names:
+ if pool_name not in builders_in_pool:
+ builders_in_pool[pool_name] = set()
+ pool_data = builders['slave_pools'][pool_name]
+ for slave in pool_data['slaves']:
+ builders_in_pool[pool_name].add(builder_name)
+
+ # Now we can generate the list of slaves using the above lookup table.
+ slaves = []
+ for pool_name, pool_data in builders['slave_pools'].items():
+ slave_data = pool_data['slave_data']
+ builder_names = sorted(builders_in_pool[pool_name])
+ for slave in pool_data['slaves']:
+ slaves.append({
+ 'hostname': slave,
+ 'builder_name': builder_names,
+ 'master': builders['master_classname'],
+ 'os': slave_data['os'],
+ 'version': slave_data['version'],
+ 'bits': slave_data['bits'],
+ })
+
+ return slaves
+
+def GetSlaveNamesForBuilder(builders, builder_name):
+ """Returns a list of slave hostnames for the given builder name."""
+ slaves = []
+ pool_names = builders['builders'][builder_name]['slave_pools']
+ for pool_name in pool_names:
+ slaves.extend(builders['slave_pools'][pool_name]['slaves'])
+ return slaves
diff --git a/infra/scripts/legacy/scripts/common/env.py b/infra/scripts/legacy/scripts/common/env.py
new file mode 100755
index 0000000..3e497ff
--- /dev/null
+++ b/infra/scripts/legacy/scripts/common/env.py
@@ -0,0 +1,439 @@
+#!/usr/bin/env python
+# Copyright 2015 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""Implements a standard mechanism for Chrome Infra Python environment setup.
+
+This library provides a central location to define Chrome Infra environment
+setup. It also provides several faculties to install this environment.
+
+Within a cooperating script, the environment can be setup by importing this
+module and running its 'Install' method:
+
+ # Install Chrome-Infra environment (replaces 'sys.path').
+ sys.path.insert(0,
+ os.path.join(os.path.dirname(__file__), os.pardir, ...))
+ # (/path/to/build/scripts)
+ import common.env
+ common.env.Install()
+
+When attempting to export the Chrome Infra path to external scripts, this
+script can be invoked as an executable with various subcommands to emit a valid
+PYTHONPATH clause.
+
+In addition, this module has several functions to construct the path.
+
+The goal is to deploy this module universally among Chrome-Infra scripts,
+BuildBot configurations, tool invocations, and tests to ensure that they all
+execute with the same centrally-defined environment.
+"""
+
+import argparse
+import collections
+import contextlib
+import imp
+import itertools
+import os
+import sys
+import traceback
+
+
+# Export for bootstrapping.
+__all__ = [
+ 'Install',
+ 'PythonPath',
+ ]
+
+
+# Name of enviornment extension file to seek.
+ENV_EXTENSION_NAME = 'environment.cfg.py'
+
+# Standard directories (based on this file's location in the <build> tree).
+def path_if(*args):
+ if not all(args):
+ return None
+ path = os.path.abspath(os.path.join(*args))
+ return (path) if os.path.exists(path) else (None)
+
+# The path to the <build> directory in which this script resides.
+Build = path_if(os.path.dirname(__file__), os.pardir, os.pardir)
+# The path to the <build_internal> directory.
+BuildInternal = path_if(Build, os.pardir, 'build_internal')
+
+
+def SetPythonPathEnv(value):
+ """Sets the system's PYTHONPATH environemnt variable.
+
+ Args:
+ value (str): The value to use. If this is empty/None, the system's
+ PYTHONPATH will be cleared.
+ """
+ # Since we can't assign None to the environment "dictionary", we have to
+ # either set or delete the key depending on the original value.
+ if value is not None:
+ os.environ['PYTHONPATH'] = str(value)
+ else:
+ os.environ.pop('PYTHONPATH', None)
+
+
+def Install(**kwargs):
+ """Replaces the current 'sys.path' with a hermetic Chrome-Infra path.
+
+ Args:
+ kwargs (dict): See GetInfraPythonPath arguments.
+
+ Returns (PythonPath): The PythonPath object that was installed.
+ """
+ infra_python_path = GetInfraPythonPath(**kwargs)
+ infra_python_path.Install()
+ return infra_python_path
+
+
+def SplitPath(path):
+ """Returns (list): A list of path elements.
+
+ Splits a path into path elements. For example (assuming '/' is the local
+ system path separator):
+ >>> print SplitPath('/a/b/c/d')
+ ['/', 'a', 'b', 'c', 'd']
+ >>> print SplitPath('a/b/c')
+ ['a', 'b,' 'c']
+ """
+ parts = []
+ while True:
+ path, component = os.path.split(path)
+ if not component:
+ if path:
+ parts.append(path)
+ break
+ parts.append(component)
+ parts.reverse()
+ return parts
+
+
+def ExtendPath(base, root_dir):
+ """Returns (PythonPath): The extended python path.
+
+ This method looks for the ENV_EXTENSION_NAME file within "root_dir". If
+ present, it will be loaded as a Python module and have its "Extend" method
+ called.
+
+ If no extension is found, the base PythonPath will be returned.
+
+ Args:
+ base (PythonPath): The base python path.
+ root_dir (str): The path to check for an extension.
+ """
+ extension_path = os.path.join(root_dir, ENV_EXTENSION_NAME)
+ if not os.path.isfile(extension_path):
+ return base
+ with open(extension_path, 'r') as fd:
+ extension = fd.read()
+ extension_module = imp.new_module('env-extension')
+
+ # Execute the enviornment extension.
+ try:
+ exec extension in extension_module.__dict__
+
+ extend_func = getattr(extension_module, 'Extend', None)
+ assert extend_func, (
+ "The environment extension module is missing the 'Extend()' method.")
+ base = extend_func(base, root_dir)
+ if not isinstance(base, PythonPath):
+ raise TypeError("Extension module returned non-PythonPath object (%s)" % (
+ type(base).__name__,))
+ except Exception:
+ # Re-raise the exception, but include the configuration file name.
+ tb = traceback.format_exc()
+ raise RuntimeError("Environment extension [%s] raised exception: %s" % (
+ extension_path, tb))
+ return base
+
+
+def IsSystemPythonPath(path):
+ """Returns (bool): If a python path is user-installed.
+
+ Paths that are known to be user-installed paths can be ignored when setting
+ up a hermetic Python path environment to avoid user libraries that would not
+ be present in other environments falsely affecting code.
+
+ This function can be updated as-needed to exclude other non-system paths
+ encountered on bots and in the wild.
+ """
+ components = SplitPath(path)
+ for component in components:
+ if component in ('dist-packages', 'site-packages'):
+ return False
+ return True
+
+
+class PythonPath(collections.Sequence):
+ """An immutable set of Python path elements.
+
+ All paths represented in this structure are absolute. If a relative path
+ is passed into this structure, it will be converted to absolute based on
+ the current working directory (via os.path.abspath).
+ """
+
+ def __init__(self, components=None):
+ """Initializes a new PythonPath instance.
+
+ Args:
+ components (list): A list of path component strings.
+ """
+ seen = set()
+ self._components = []
+ for component in (components or ()):
+ component = os.path.abspath(component)
+ assert isinstance(component, basestring), (
+ "Path component '%s' is not a string (%s)" % (
+ component, type(component).__name__))
+ if component in seen:
+ continue
+ seen.add(component)
+ self._components.append(component)
+
+ def __getitem__(self, value):
+ return self._components[value]
+
+ def __len__(self):
+ return len(self._components)
+
+ def __iadd__(self, other):
+ return self.Append(other)
+
+ def __repr__(self):
+ return self.pathstr
+
+ def __eq__(self, other):
+ assert isinstance(other, type(self))
+ return self._components == other._components
+
+ @classmethod
+ def Flatten(cls, *paths):
+ """Returns (list): A single-level list containing flattened path elements.
+
+ >>> print PythonPath.Flatten('a', ['b', ['c', 'd']])
+ ['a', 'b', 'c', 'd']
+ """
+ result = []
+ for path in paths:
+ if not isinstance(path, basestring):
+ # Assume it's an iterable of paths.
+ result += cls.Flatten(*path)
+ else:
+ result.append(path)
+ return result
+
+ @classmethod
+ def FromPaths(cls, *paths):
+ """Returns (PythonPath): A PythonPath instantiated from path elements.
+
+ Args:
+ paths (tuple): A tuple of path elements or iterables containing path
+ elements (e.g., PythonPath instances).
+ """
+ return cls(cls.Flatten(*paths))
+
+ @classmethod
+ def FromPathStr(cls, pathstr):
+ """Returns (PythonPath): A PythonPath instantiated from the path string.
+
+ Args:
+ pathstr (str): An os.pathsep()-delimited path string.
+ """
+ return cls(pathstr.split(os.pathsep))
+
+ @property
+ def pathstr(self):
+ """Returns (str): A path string for the instance's path elements."""
+ return os.pathsep.join(self)
+
+ def IsHermetic(self):
+ """Returns (bool): True if this instance contains only system paths."""
+ return all(IsSystemPythonPath(p) for p in self)
+
+ def GetHermetic(self):
+ """Returns (PythonPath): derivative PythonPath containing only system paths.
+ """
+ return type(self).FromPaths(*(p for p in self if IsSystemPythonPath(p)))
+
+ def Append(self, *paths):
+ """Returns (PythonPath): derivative PythonPath with paths added to the end.
+
+ Args:
+ paths (tuple): A tuple of path elements to append to the current instance.
+ """
+ return type(self)(itertools.chain(self, self.FromPaths(*paths)))
+
+ def Override(self, *paths):
+ """Returns (PythonPath): derivative PythonPath with paths prepended.
+
+ Args:
+ paths (tuple): A tuple of path elements to prepend to the current
+ instance.
+ """
+ return self.FromPaths(*paths).Append(self)
+
+ def Install(self):
+ """Overwrites Python runtime variables based on the current instance.
+
+ Performs the following operations:
+ - Replaces sys.path with the current instance's path.
+ - Replaces os.environ['PYTHONPATH'] with the current instance's path
+ string.
+ """
+ sys.path = list(self)
+ SetPythonPathEnv(self.pathstr)
+
+ @contextlib.contextmanager
+ def Enter(self):
+ """Context manager wrapper for Install.
+
+ On exit, the context manager will restore the original environment.
+ """
+ orig_sys_path = sys.path[:]
+ orig_pythonpath = os.environ.get('PYTHONPATH')
+
+ try:
+ self.Install()
+ yield
+ finally:
+ sys.path = orig_sys_path
+ SetPythonPathEnv(orig_pythonpath)
+
+
+def GetSysPythonPath(hermetic=True):
+ """Returns (PythonPath): A path based on 'sys.path'.
+
+ Args:
+ hermetic (bool): If True, prune any non-system path.
+ """
+ path = PythonPath.FromPaths(*sys.path)
+ if hermetic:
+ path = path.GetHermetic()
+ return path
+
+
+def GetEnvPythonPath():
+ """Returns (PythonPath): A path based on the PYTHONPATH environment variable.
+ """
+ pythonpath = os.environ.get('PYTHONPATH')
+ if not pythonpath:
+ return PythonPath.FromPaths()
+ return PythonPath.FromPathStr(pythonpath)
+
+
+def GetMasterPythonPath(master_dir):
+ """Returns (PythonPath): A path including a BuildBot master's directory.
+
+ Args:
+ master_dir (str): The BuildBot master root directory.
+ """
+ return PythonPath.FromPaths(master_dir)
+
+
+def GetBuildPythonPath():
+ """Returns (PythonPath): The Chrome Infra build path."""
+ build_path = PythonPath.FromPaths()
+ for extension_dir in (
+ Build,
+ BuildInternal,
+ ):
+ if extension_dir:
+ build_path = ExtendPath(build_path, extension_dir)
+ return build_path
+
+
+def GetInfraPythonPath(hermetic=True, master_dir=None):
+ """Returns (PythonPath): The full working Chrome Infra utility path.
+
+ This path is consistent for master, slave, and tool usage. It includes (in
+ this order):
+ - Any environment PYTHONPATH overrides.
+ - If 'master_dir' is supplied, the master's python path component.
+ - The Chrome Infra build path.
+ - The system python path.
+
+ Args:
+ hermetic (bool): True, prune any non-system path from the system path.
+ master_dir (str): If not None, include a master path component.
+ """
+ path = GetEnvPythonPath()
+ if master_dir:
+ path += GetMasterPythonPath(master_dir)
+ path += GetBuildPythonPath()
+ path += GetSysPythonPath(hermetic=hermetic)
+ return path
+
+
+def _InfraPathFromArgs(args):
+ """Returns (PythonPath): A PythonPath populated from command-line arguments.
+
+ Args:
+ args (argparse.Namespace): The command-line arguments constructed by 'main'.
+ """
+ return GetInfraPythonPath(
+ master_dir=args.master_dir,
+ )
+
+
+def _Command_Echo(args, path):
+ """Returns (int): Return code.
+
+ Command function for the 'echo' subcommand. Outputs the path string for
+ 'path'.
+
+ Args:
+ args (argparse.Namespace): The command-line arguments constructed by 'main'.
+ path (PythonPath): The python path to use.
+ """
+ args.output.write(path.pathstr)
+ return 0
+
+
+def _Command_Print(args, path):
+ """Returns (int): Return code.
+
+ Command function for the 'print' subcommand. Outputs each path component in
+ path on a separate line.
+
+ Args:
+ args (argparse.Namespace): The command-line arguments constructed by 'main'.
+ path (PythonPath): The python path to use.
+ """
+ for component in path:
+ print >>args.output, component
+ return 0
+
+
+def main():
+ """Main execution function."""
+ parser = argparse.ArgumentParser()
+ parser.add_argument('-M', '--master_dir',
+ help="Augment the path with the master's directory.")
+ parser.add_argument('-o', '--output', metavar='PATH',
+ type=argparse.FileType('w'), default='-',
+ help="File to output to (use '-' for STDOUT).")
+
+ subparsers = parser.add_subparsers()
+
+ # 'echo'
+ subparser = subparsers.add_parser('echo')
+ subparser.set_defaults(func=_Command_Echo)
+
+ # 'print'
+ subparser = subparsers.add_parser('print')
+ subparser.set_defaults(func=_Command_Print)
+
+ # Parse
+ args = parser.parse_args()
+
+ # Execute our subcommand function, which will return the exit code.
+ path = _InfraPathFromArgs(args)
+ return args.func(args, path)
+
+
+if __name__ == '__main__':
+ sys.exit(main())
diff --git a/infra/scripts/legacy/scripts/common/gtest_utils.py b/infra/scripts/legacy/scripts/common/gtest_utils.py
new file mode 100755
index 0000000..7c50ef6
--- /dev/null
+++ b/infra/scripts/legacy/scripts/common/gtest_utils.py
@@ -0,0 +1,659 @@
+#!/usr/bin/env python
+# Copyright (c) 2012 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+from common import chromium_utils
+import json
+import os
+import re
+import tempfile
+
+
+# These labels should match the ones output by gtest's JSON.
+TEST_UNKNOWN_LABEL = 'UNKNOWN'
+TEST_SUCCESS_LABEL = 'SUCCESS'
+TEST_FAILURE_LABEL = 'FAILURE'
+TEST_FAILURE_ON_EXIT_LABEL = 'FAILURE_ON_EXIT'
+TEST_CRASH_LABEL = 'CRASH'
+TEST_TIMEOUT_LABEL = 'TIMEOUT'
+TEST_SKIPPED_LABEL = 'SKIPPED'
+TEST_WARNING_LABEL = 'WARNING'
+
+FULL_RESULTS_FILENAME = 'full_results.json'
+TIMES_MS_FILENAME = 'times_ms.json'
+
+def CompressList(lines, max_length, middle_replacement):
+ """Ensures that |lines| is no longer than |max_length|. If |lines| need to
+ be compressed then the middle items are replaced by |middle_replacement|.
+ """
+ if len(lines) <= max_length:
+ return lines
+ remove_from_start = max_length / 2
+ return (lines[:remove_from_start] +
+ [middle_replacement] +
+ lines[len(lines) - (max_length - remove_from_start):])
+
+
+class GTestLogParser(object):
+ """This helper class process GTest test output."""
+
+ def __init__(self):
+ # State tracking for log parsing
+ self.completed = False
+ self._current_test = ''
+ self._failure_description = []
+ self._current_report_hash = ''
+ self._current_report = []
+ self._parsing_failures = False
+
+ # Line number currently being processed.
+ self._line_number = 0
+
+ # List of parsing errors, as human-readable strings.
+ self._internal_error_lines = []
+
+ # Tests are stored here as 'test.name': (status, [description]).
+ # The status should be one of ('started', 'OK', 'failed', 'timeout',
+ # 'warning'). Warning indicates that a test did not pass when run in
+ # parallel with other tests but passed when run alone. The description is
+ # a list of lines detailing the test's error, as reported in the log.
+ self._test_status = {}
+
+ # Reports are stored here as 'hash': [report].
+ self._memory_tool_reports = {}
+
+ # This may be either text or a number. It will be used in the phrase
+ # '%s disabled' or '%s flaky' on the waterfall display.
+ self._disabled_tests = 0
+ self._flaky_tests = 0
+
+ # Regular expressions for parsing GTest logs. Test names look like
+ # SomeTestCase.SomeTest
+ # SomeName/SomeTestCase.SomeTest/1
+ # This regexp also matches SomeName.SomeTest/1, which should be harmless.
+ test_name_regexp = r'((\w+/)?\w+\.\w+(/\d+)?)'
+
+ self._master_name_re = re.compile(r'\[Running for master: "([^"]*)"')
+ self.master_name = ''
+
+ self._test_name = re.compile(test_name_regexp)
+ self._test_start = re.compile(r'\[\s+RUN\s+\] ' + test_name_regexp)
+ self._test_ok = re.compile(r'\[\s+OK\s+\] ' + test_name_regexp)
+ self._test_fail = re.compile(r'\[\s+FAILED\s+\] ' + test_name_regexp)
+ self._test_passed = re.compile(r'\[\s+PASSED\s+\] \d+ tests?.')
+ self._run_test_cases_line = re.compile(
+ r'\[\s*\d+\/\d+\]\s+[0-9\.]+s ' + test_name_regexp + ' .+')
+ self._test_timeout = re.compile(
+ r'Test timeout \([0-9]+ ms\) exceeded for ' + test_name_regexp)
+ self._disabled = re.compile(r'\s*YOU HAVE (\d+) DISABLED TEST')
+ self._flaky = re.compile(r'\s*YOU HAVE (\d+) FLAKY TEST')
+
+ self._report_start = re.compile(
+ r'### BEGIN MEMORY TOOL REPORT \(error hash=#([0-9A-F]+)#\)')
+ self._report_end = re.compile(
+ r'### END MEMORY TOOL REPORT \(error hash=#([0-9A-F]+)#\)')
+
+ self._retry_message = re.compile('RETRYING FAILED TESTS:')
+ self.retrying_failed = False
+
+ self.TEST_STATUS_MAP = {
+ 'OK': TEST_SUCCESS_LABEL,
+ 'failed': TEST_FAILURE_LABEL,
+ 'timeout': TEST_TIMEOUT_LABEL,
+ 'warning': TEST_WARNING_LABEL
+ }
+
+ def GetCurrentTest(self):
+ return self._current_test
+
+ def _StatusOfTest(self, test):
+ """Returns the status code for the given test, or 'not known'."""
+ test_status = self._test_status.get(test, ('not known', []))
+ return test_status[0]
+
+ def _TestsByStatus(self, status, include_fails, include_flaky):
+ """Returns list of tests with the given status.
+
+ Args:
+ include_fails: If False, tests containing 'FAILS_' anywhere in their
+ names will be excluded from the list.
+ include_flaky: If False, tests containing 'FLAKY_' anywhere in their
+ names will be excluded from the list.
+ """
+ test_list = [x[0] for x in self._test_status.items()
+ if self._StatusOfTest(x[0]) == status]
+
+ if not include_fails:
+ test_list = [x for x in test_list if x.find('FAILS_') == -1]
+ if not include_flaky:
+ test_list = [x for x in test_list if x.find('FLAKY_') == -1]
+
+ return test_list
+
+ def _RecordError(self, line, reason):
+ """Record a log line that produced a parsing error.
+
+ Args:
+ line: text of the line at which the error occurred
+ reason: a string describing the error
+ """
+ self._internal_error_lines.append('%s: %s [%s]' %
+ (self._line_number, line.strip(), reason))
+
+ def RunningTests(self):
+ """Returns list of tests that appear to be currently running."""
+ return self._TestsByStatus('started', True, True)
+
+ def ParsingErrors(self):
+ """Returns a list of lines that have caused parsing errors."""
+ return self._internal_error_lines
+
+ def ClearParsingErrors(self):
+ """Clears the currently stored parsing errors."""
+ self._internal_error_lines = ['Cleared.']
+
+ def PassedTests(self, include_fails=False, include_flaky=False):
+ """Returns list of tests that passed."""
+ return self._TestsByStatus('OK', include_fails, include_flaky)
+
+ def FailedTests(self, include_fails=False, include_flaky=False):
+ """Returns list of tests that failed, timed out, or didn't finish
+ (crashed).
+
+ This list will be incorrect until the complete log has been processed,
+ because it will show currently running tests as having failed.
+
+ Args:
+ include_fails: If true, all failing tests with FAILS_ in their names will
+ be included. Otherwise, they will only be included if they crashed or
+ timed out.
+ include_flaky: If true, all failing tests with FLAKY_ in their names will
+ be included. Otherwise, they will only be included if they crashed or
+ timed out.
+
+ """
+ return (self._TestsByStatus('failed', include_fails, include_flaky) +
+ self._TestsByStatus('timeout', True, True) +
+ self._TestsByStatus('warning', include_fails, include_flaky) +
+ self.RunningTests())
+
+ def TriesForTest(self, test):
+ """Returns a list containing the state for all tries of the given test.
+ This parser doesn't support retries so a single result is returned."""
+ return [self.TEST_STATUS_MAP.get(self._StatusOfTest(test),
+ TEST_UNKNOWN_LABEL)]
+
+ def DisabledTests(self):
+ """Returns the name of the disabled test (if there is only 1) or the number
+ of disabled tests.
+ """
+ return self._disabled_tests
+
+ def FlakyTests(self):
+ """Returns the name of the flaky test (if there is only 1) or the number
+ of flaky tests.
+ """
+ return self._flaky_tests
+
+ def FailureDescription(self, test):
+ """Returns a list containing the failure description for the given test.
+
+ If the test didn't fail or timeout, returns [].
+ """
+ test_status = self._test_status.get(test, ('', []))
+ return ['%s: ' % test] + test_status[1]
+
+ def MemoryToolReportHashes(self):
+ """Returns list of report hashes found in the log."""
+ return self._memory_tool_reports.keys()
+
+ def MemoryToolReport(self, report_hash):
+ """Returns a list containing the report for a given hash.
+
+ If the report hash doesn't exist, returns [].
+ """
+ return self._memory_tool_reports.get(report_hash, [])
+
+ def CompletedWithoutFailure(self):
+ """Returns True if all tests completed and no tests failed unexpectedly."""
+ return self.completed and not self.FailedTests()
+
+ def ProcessLine(self, line):
+ """This is called once with each line of the test log."""
+
+ # Track line number for error messages.
+ self._line_number += 1
+
+ # Some tests (net_unittests in particular) run subprocesses which can write
+ # stuff to shared stdout buffer. Sometimes such output appears between new
+ # line and gtest directives ('[ RUN ]', etc) which breaks the parser.
+ # Code below tries to detect such cases and recognize a mixed line as two
+ # separate lines.
+
+ # List of regexps that parses expects to find at the start of a line but
+ # which can be somewhere in the middle.
+ gtest_regexps = [
+ self._test_start,
+ self._test_ok,
+ self._test_fail,
+ self._test_passed,
+ ]
+
+ for regexp in gtest_regexps:
+ match = regexp.search(line)
+ if match:
+ break
+
+ if not match or match.start() == 0:
+ self._ProcessLine(line)
+ else:
+ self._ProcessLine(line[:match.start()])
+ self._ProcessLine(line[match.start():])
+
+ def _ProcessLine(self, line):
+ """Parses the line and changes the state of parsed tests accordingly.
+
+ Will recognize newly started tests, OK or FAILED statuses, timeouts, etc.
+ """
+
+ # Note: When sharding, the number of disabled and flaky tests will be read
+ # multiple times, so this will only show the most recent values (but they
+ # should all be the same anyway).
+
+ # Is it a line listing the master name?
+ if not self.master_name:
+ results = self._master_name_re.match(line)
+ if results:
+ self.master_name = results.group(1)
+
+ results = self._run_test_cases_line.match(line)
+ if results:
+ # A run_test_cases.py output.
+ if self._current_test:
+ if self._test_status[self._current_test][0] == 'started':
+ self._test_status[self._current_test] = (
+ 'timeout', self._failure_description)
+ self._current_test = ''
+ self._failure_description = []
+ return
+
+ # Is it a line declaring all tests passed?
+ results = self._test_passed.match(line)
+ if results:
+ self.completed = True
+ self._current_test = ''
+ return
+
+ # Is it a line reporting disabled tests?
+ results = self._disabled.match(line)
+ if results:
+ try:
+ disabled = int(results.group(1))
+ except ValueError:
+ disabled = 0
+ if disabled > 0 and isinstance(self._disabled_tests, int):
+ self._disabled_tests = disabled
+ else:
+ # If we can't parse the line, at least give a heads-up. This is a
+ # safety net for a case that shouldn't happen but isn't a fatal error.
+ self._disabled_tests = 'some'
+ return
+
+ # Is it a line reporting flaky tests?
+ results = self._flaky.match(line)
+ if results:
+ try:
+ flaky = int(results.group(1))
+ except ValueError:
+ flaky = 0
+ if flaky > 0 and isinstance(self._flaky_tests, int):
+ self._flaky_tests = flaky
+ else:
+ # If we can't parse the line, at least give a heads-up. This is a
+ # safety net for a case that shouldn't happen but isn't a fatal error.
+ self._flaky_tests = 'some'
+ return
+
+ # Is it the start of a test?
+ results = self._test_start.match(line)
+ if results:
+ if self._current_test:
+ if self._test_status[self._current_test][0] == 'started':
+ self._test_status[self._current_test] = (
+ 'timeout', self._failure_description)
+ test_name = results.group(1)
+ self._test_status[test_name] = ('started', ['Did not complete.'])
+ self._current_test = test_name
+ if self.retrying_failed:
+ self._failure_description = self._test_status[test_name][1]
+ self._failure_description.extend(['', 'RETRY OUTPUT:', ''])
+ else:
+ self._failure_description = []
+ return
+
+ # Is it a test success line?
+ results = self._test_ok.match(line)
+ if results:
+ test_name = results.group(1)
+ status = self._StatusOfTest(test_name)
+ if status != 'started':
+ self._RecordError(line, 'success while in status %s' % status)
+ if self.retrying_failed:
+ self._test_status[test_name] = ('warning', self._failure_description)
+ else:
+ self._test_status[test_name] = ('OK', [])
+ self._failure_description = []
+ self._current_test = ''
+ return
+
+ # Is it a test failure line?
+ results = self._test_fail.match(line)
+ if results:
+ test_name = results.group(1)
+ status = self._StatusOfTest(test_name)
+ if status not in ('started', 'failed', 'timeout'):
+ self._RecordError(line, 'failure while in status %s' % status)
+ # Don't overwrite the failure description when a failing test is listed a
+ # second time in the summary, or if it was already recorded as timing
+ # out.
+ if status not in ('failed', 'timeout'):
+ self._test_status[test_name] = ('failed', self._failure_description)
+ self._failure_description = []
+ self._current_test = ''
+ return
+
+ # Is it a test timeout line?
+ results = self._test_timeout.search(line)
+ if results:
+ test_name = results.group(1)
+ status = self._StatusOfTest(test_name)
+ if status not in ('started', 'failed'):
+ self._RecordError(line, 'timeout while in status %s' % status)
+ self._test_status[test_name] = (
+ 'timeout', self._failure_description + ['Killed (timed out).'])
+ self._failure_description = []
+ self._current_test = ''
+ return
+
+ # Is it the start of a new memory tool report?
+ results = self._report_start.match(line)
+ if results:
+ report_hash = results.group(1)
+ if report_hash in self._memory_tool_reports:
+ self._RecordError(line, 'multiple reports for this hash')
+ self._memory_tool_reports[report_hash] = []
+ self._current_report_hash = report_hash
+ self._current_report = []
+ return
+
+ # Is it the end of a memory tool report?
+ results = self._report_end.match(line)
+ if results:
+ report_hash = results.group(1)
+ if not self._current_report_hash:
+ self._RecordError(line, 'no BEGIN matches this END')
+ elif report_hash != self._current_report_hash:
+ self._RecordError(line, 'expected (error hash=#%s#)' %
+ self._current_report_hash)
+ else:
+ self._memory_tool_reports[self._current_report_hash] = (
+ self._current_report)
+ self._current_report_hash = ''
+ self._current_report = []
+ return
+
+ # Is it the start of the retry tests?
+ results = self._retry_message.match(line)
+ if results:
+ self.retrying_failed = True
+ return
+
+ # Random line: if we're in a report, collect it. Reports are
+ # generated after all tests are finished, so this should always belong to
+ # the current report hash.
+ if self._current_report_hash:
+ self._current_report.append(line)
+ return
+
+ # Random line: if we're in a test, collect it for the failure description.
+ # Tests may run simultaneously, so this might be off, but it's worth a try.
+ # This also won't work if a test times out before it begins running.
+ if self._current_test:
+ self._failure_description.append(line)
+
+ # Parse the "Failing tests:" list at the end of the output, and add any
+ # additional failed tests to the list. For example, this includes tests
+ # that crash after the OK line.
+ if self._parsing_failures:
+ results = self._test_name.match(line)
+ if results:
+ test_name = results.group(1)
+ status = self._StatusOfTest(test_name)
+ if status in ('not known', 'OK'):
+ self._test_status[test_name] = (
+ 'failed', ['Unknown error, see stdio log.'])
+ else:
+ self._parsing_failures = False
+ elif line.startswith('Failing tests:'):
+ self._parsing_failures = True
+
+
+class GTestJSONParser(object):
+ # Limit of output snippet lines. Avoids flooding the logs with amount
+ # of output that gums up the infrastructure.
+ OUTPUT_SNIPPET_LINES_LIMIT = 5000
+
+ def __init__(self, mastername=None):
+ self.json_file_path = None
+ self.delete_json_file = False
+
+ self.disabled_tests = set()
+ self.passed_tests = set()
+ self.failed_tests = set()
+ self.flaky_tests = set()
+ self.test_logs = {}
+ self.run_results = {}
+ self.ignored_failed_tests = set()
+
+ self.parsing_errors = []
+
+ self.master_name = mastername
+
+ # List our labels that match the ones output by gtest JSON.
+ self.SUPPORTED_LABELS = (TEST_UNKNOWN_LABEL,
+ TEST_SUCCESS_LABEL,
+ TEST_FAILURE_LABEL,
+ TEST_FAILURE_ON_EXIT_LABEL,
+ TEST_CRASH_LABEL,
+ TEST_TIMEOUT_LABEL,
+ TEST_SKIPPED_LABEL)
+
+ def ProcessLine(self, line):
+ # Deliberately do nothing - we parse out-of-band JSON summary
+ # instead of in-band stdout.
+ pass
+
+ def PassedTests(self):
+ return sorted(self.passed_tests)
+
+ def FailedTests(self, include_fails=False, include_flaky=False):
+ return sorted(self.failed_tests - self.ignored_failed_tests)
+
+ def TriesForTest(self, test):
+ """Returns a list containing the state for all tries of the given test."""
+ return self.run_results.get(test, [TEST_UNKNOWN_LABEL])
+
+ def FailureDescription(self, test):
+ return self.test_logs.get(test, [])
+
+ def IgnoredFailedTests(self):
+ return sorted(self.ignored_failed_tests)
+
+ @staticmethod
+ def MemoryToolReportHashes():
+ return []
+
+ def ParsingErrors(self):
+ return self.parsing_errors
+
+ def ClearParsingErrors(self):
+ self.parsing_errors = ['Cleared.']
+
+ def DisabledTests(self):
+ return len(self.disabled_tests)
+
+ def FlakyTests(self):
+ return len(self.flaky_tests)
+
+ @staticmethod
+ def RunningTests():
+ return []
+
+ def PrepareJSONFile(self, cmdline_path):
+ if cmdline_path:
+ self.json_file_path = cmdline_path
+ # If the caller requested JSON summary, do not delete it.
+ self.delete_json_file = False
+ else:
+ fd, self.json_file_path = tempfile.mkstemp()
+ os.close(fd)
+ # When we create the file ourselves, delete it to avoid littering.
+ self.delete_json_file = True
+ return self.json_file_path
+
+ def ProcessJSONFile(self, build_dir):
+ if not self.json_file_path:
+ return
+
+ with open(self.json_file_path) as json_file:
+ try:
+ json_output = json_file.read()
+ json_data = json.loads(json_output)
+ except ValueError:
+ # Only signal parsing error if the file is non-empty. Empty file
+ # most likely means the binary doesn't support JSON output.
+ if json_output:
+ self.parsing_errors = json_output.split('\n')
+ else:
+ self.ProcessJSONData(json_data, build_dir)
+
+ if self.delete_json_file:
+ os.remove(self.json_file_path)
+
+ @staticmethod
+ def ParseIgnoredFailedTestSpec(dir_in_chrome):
+ """Returns parsed ignored failed test spec.
+
+ Args:
+ dir_in_chrome: Any directory within chrome checkout to be used as a
+ reference to find ignored failed test spec file.
+
+ Returns:
+ A list of tuples (test_name, platforms), where platforms is a list of sets
+ of platform flags. For example:
+
+ [('MyTest.TestOne', [set('OS_WIN', 'CPU_32_BITS', 'MODE_RELEASE'),
+ set('OS_LINUX', 'CPU_64_BITS', 'MODE_DEBUG')]),
+ ('MyTest.TestTwo', [set('OS_MACOSX', 'CPU_64_BITS', 'MODE_RELEASE'),
+ set('CPU_32_BITS')]),
+ ('MyTest.TestThree', [set()]]
+ """
+
+ try:
+ ignored_failed_tests_path = chromium_utils.FindUpward(
+ os.path.abspath(dir_in_chrome), 'tools', 'ignorer_bot',
+ 'ignored_failed_tests.txt')
+ except chromium_utils.PathNotFound:
+ return
+
+ with open(ignored_failed_tests_path) as ignored_failed_tests_file:
+ ignored_failed_tests_spec = ignored_failed_tests_file.readlines()
+
+ parsed_spec = []
+ for spec_line in ignored_failed_tests_spec:
+ spec_line = spec_line.strip()
+ if spec_line.startswith('#') or not spec_line:
+ continue
+
+ # Any number of platform flags identifiers separated by whitespace.
+ platform_spec_regexp = r'[A-Za-z0-9_\s]*'
+
+ match = re.match(
+ r'^crbug.com/\d+' # Issue URL.
+ r'\s+' # Some whitespace.
+ r'\[(' + # Opening square bracket '['.
+ platform_spec_regexp + # At least one platform, and...
+ r'(?:,' + # ...separated by commas...
+ platform_spec_regexp + # ...any number of additional...
+ r')*' # ...platforms.
+ r')\]' # Closing square bracket ']'.
+ r'\s+' # Some whitespace.
+ r'(\S+)$', spec_line) # Test name.
+
+ if not match:
+ continue
+
+ platform_specs = match.group(1).strip()
+ test_name = match.group(2).strip()
+
+ platforms = [set(platform.split())
+ for platform in platform_specs.split(',')]
+
+ parsed_spec.append((test_name, platforms))
+
+ return parsed_spec
+
+
+ def _RetrieveIgnoredFailuresForPlatform(self, build_dir, platform_flags):
+ """Parses the ignored failed tests spec into self.ignored_failed_tests."""
+ if not build_dir:
+ return
+
+ platform_flags = set(platform_flags)
+ parsed_spec = self.ParseIgnoredFailedTestSpec(build_dir)
+
+ if not parsed_spec:
+ return
+
+ for test_name, platforms in parsed_spec:
+ for required_platform_flags in platforms:
+ if required_platform_flags.issubset(platform_flags):
+ self.ignored_failed_tests.add(test_name)
+ break
+
+ def ProcessJSONData(self, json_data, build_dir=None):
+ self.disabled_tests.update(json_data['disabled_tests'])
+ self._RetrieveIgnoredFailuresForPlatform(
+ build_dir, json_data['global_tags'])
+
+ for iteration_data in json_data['per_iteration_data']:
+ for test_name, test_runs in iteration_data.iteritems():
+ if test_runs[-1]['status'] == 'SUCCESS':
+ self.passed_tests.add(test_name)
+ else:
+ self.failed_tests.add(test_name)
+
+ self.run_results[test_name] = []
+ self.test_logs.setdefault(test_name, [])
+ for run_index, run_data in enumerate(test_runs, start=1):
+ # Mark as flaky if the run result differs.
+ if run_data['status'] != test_runs[0]['status']:
+ self.flaky_tests.add(test_name)
+ if run_data['status'] in self.SUPPORTED_LABELS:
+ self.run_results[test_name].append(run_data['status'])
+ else:
+ self.run_results[test_name].append(TEST_UNKNOWN_LABEL)
+ run_lines = ['%s (run #%d):' % (test_name, run_index)]
+ # Make sure the annotations are ASCII to avoid character set related
+ # errors. They are mostly informational anyway, and more detailed
+ # info can be obtained from the original JSON output.
+ ascii_lines = run_data['output_snippet'].encode('ascii',
+ errors='replace')
+ decoded_lines = CompressList(
+ ascii_lines.decode('string_escape').split('\n'),
+ self.OUTPUT_SNIPPET_LINES_LIMIT,
+ '<truncated, full output is in gzipped JSON '
+ 'output at end of step>')
+ run_lines.extend(decoded_lines)
+ self.test_logs[test_name].extend(run_lines)
diff --git a/infra/scripts/legacy/scripts/common/url_helper.py b/infra/scripts/legacy/scripts/common/url_helper.py
new file mode 100644
index 0000000..78adc86
--- /dev/null
+++ b/infra/scripts/legacy/scripts/common/url_helper.py
@@ -0,0 +1,60 @@
+# Copyright 2013 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import mimetypes
+import urllib2
+
+
+def GetMimeType(filename):
+ return mimetypes.guess_type(filename)[0] or 'application/octet-stream'
+
+
+def EncodeMultipartFormData(fields, files):
+ """Encode form fields for multipart/form-data.
+
+ Args:
+ fields: A sequence of (name, value) elements for regular form fields.
+ files: A sequence of (name, filename, value) elements for data to be
+ uploaded as files.
+ Returns:
+ (content_type, body) ready for httplib.HTTP instance.
+
+ Source:
+ http://code.google.com/p/rietveld/source/browse/trunk/upload.py
+ """
+ BOUNDARY = '-M-A-G-I-C---B-O-U-N-D-A-R-Y-'
+ CRLF = '\r\n'
+ lines = []
+
+ for key, value in fields:
+ lines.append('--' + BOUNDARY)
+ lines.append('Content-Disposition: form-data; name="%s"' % key)
+ lines.append('')
+ if isinstance(value, unicode):
+ value = value.encode('utf-8')
+ lines.append(value)
+
+ for key, filename, value in files:
+ lines.append('--' + BOUNDARY)
+ lines.append('Content-Disposition: form-data; name="%s"; filename="%s"' %
+ (key, filename))
+ lines.append('Content-Type: %s' % GetMimeType(filename))
+ lines.append('')
+ if isinstance(value, unicode):
+ value = value.encode('utf-8')
+ lines.append(value)
+
+ lines.append('--' + BOUNDARY + '--')
+ lines.append('')
+ body = CRLF.join(lines)
+ content_type = 'multipart/form-data; boundary=%s' % BOUNDARY
+ return content_type, body
+
+
+def upload_files(url, attrs, file_objs):
+ content_type, data = EncodeMultipartFormData(attrs, file_objs)
+ headers = {"Content-Type": content_type}
+ request = urllib2.Request(url, data, headers)
+
+ return urllib2.urlopen(request)
diff --git a/infra/scripts/legacy/scripts/slave/__init__.py b/infra/scripts/legacy/scripts/slave/__init__.py
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/infra/scripts/legacy/scripts/slave/__init__.py
diff --git a/infra/scripts/legacy/scripts/slave/annotation_utils.py b/infra/scripts/legacy/scripts/slave/annotation_utils.py
new file mode 100644
index 0000000..88a77e9
--- /dev/null
+++ b/infra/scripts/legacy/scripts/slave/annotation_utils.py
@@ -0,0 +1,127 @@
+# Copyright 2013 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""Generates annotated output.
+
+TODO(stip): Move the gtest_utils gtest parser selection code from runtest.py
+to here.
+TODO(stip): Move the perf dashboard code from runtest.py to here.
+"""
+
+import re
+
+from slave import performance_log_processor
+from slave import slave_utils
+
+
+def getText(result, observer, name):
+ """Generate a text summary for the waterfall.
+
+ Updates the waterfall with any unusual test output, with a link to logs of
+ failed test steps.
+ """
+ GTEST_DASHBOARD_BASE = ('http://test-results.appspot.com'
+ '/dashboards/flakiness_dashboard.html')
+
+ # TODO(xusydoc): unify this with gtest reporting below so getText() is
+ # less confusing
+ if hasattr(observer, 'PerformanceSummary'):
+ basic_info = [name]
+ summary_text = ['<div class="BuildResultInfo">']
+ summary_text.extend(observer.PerformanceSummary())
+ summary_text.append('</div>')
+ return basic_info + summary_text
+
+ # basic_info is an array of lines to display on the waterfall.
+ basic_info = [name]
+
+ disabled = observer.DisabledTests()
+ if disabled:
+ basic_info.append('%s disabled' % str(disabled))
+
+ flaky = observer.FlakyTests()
+ if flaky:
+ basic_info.append('%s flaky' % str(flaky))
+
+ failed_test_count = len(observer.FailedTests())
+ if failed_test_count == 0:
+ if result == performance_log_processor.SUCCESS:
+ return basic_info
+ elif result == performance_log_processor.WARNINGS:
+ return basic_info + ['warnings']
+
+ if observer.RunningTests():
+ basic_info += ['did not complete']
+
+ # TODO(xusydoc): see if 'crashed or hung' should be tracked by RunningTests().
+ if failed_test_count:
+ failure_text = ['failed %d' % failed_test_count]
+ if observer.master_name:
+ # Include the link to the flakiness dashboard.
+ failure_text.append('<div class="BuildResultInfo">')
+ failure_text.append('<a href="%s#testType=%s'
+ '&tests=%s">' % (GTEST_DASHBOARD_BASE,
+ name,
+ ','.join(observer.FailedTests())))
+ failure_text.append('Flakiness dashboard')
+ failure_text.append('</a>')
+ failure_text.append('</div>')
+ else:
+ failure_text = ['crashed or hung']
+ return basic_info + failure_text
+
+
+def annotate(test_name, result, log_processor, perf_dashboard_id=None):
+ """Given a test result and tracker, update the waterfall with test results."""
+
+ # Always print raw exit code of the subprocess. This is very helpful
+ # for debugging, especially when one gets the "crashed or hung" message
+ # with no output (exit code can have some clues, especially on Windows).
+ print 'exit code (as seen by runtest.py): %d' % result
+
+ get_text_result = performance_log_processor.SUCCESS
+
+ for failure in sorted(log_processor.FailedTests()):
+ clean_test_name = re.sub(r'[^\w\.\-]', '_', failure)
+ slave_utils.WriteLogLines(clean_test_name,
+ log_processor.FailureDescription(failure))
+ for report_hash in sorted(log_processor.MemoryToolReportHashes()):
+ slave_utils.WriteLogLines(report_hash,
+ log_processor.MemoryToolReport(report_hash))
+
+ if log_processor.ParsingErrors():
+ # Generate a log file containing the list of errors.
+ slave_utils.WriteLogLines('log parsing error(s)',
+ log_processor.ParsingErrors())
+
+ log_processor.ClearParsingErrors()
+
+ if hasattr(log_processor, 'evaluateCommand'):
+ parser_result = log_processor.evaluateCommand('command')
+ if parser_result > result:
+ result = parser_result
+
+ if result == performance_log_processor.SUCCESS:
+ if (len(log_processor.ParsingErrors()) or
+ len(log_processor.FailedTests()) or
+ len(log_processor.MemoryToolReportHashes())):
+ print '@@@STEP_WARNINGS@@@'
+ get_text_result = performance_log_processor.WARNINGS
+ elif result == slave_utils.WARNING_EXIT_CODE:
+ print '@@@STEP_WARNINGS@@@'
+ get_text_result = performance_log_processor.WARNINGS
+ else:
+ print '@@@STEP_FAILURE@@@'
+ get_text_result = performance_log_processor.FAILURE
+
+ for desc in getText(get_text_result, log_processor, test_name):
+ print '@@@STEP_TEXT@%s@@@' % desc
+
+ if hasattr(log_processor, 'PerformanceLogs'):
+ if not perf_dashboard_id:
+ raise Exception('runtest.py error: perf step specified but'
+ 'no test_id in factory_properties!')
+ for logname, log in log_processor.PerformanceLogs().iteritems():
+ lines = [str(l).rstrip() for l in log]
+ slave_utils.WriteLogLines(logname, lines, perf=perf_dashboard_id)
diff --git a/infra/scripts/legacy/scripts/slave/bootstrap.py b/infra/scripts/legacy/scripts/slave/bootstrap.py
new file mode 100644
index 0000000..2bfa4ce
--- /dev/null
+++ b/infra/scripts/legacy/scripts/slave/bootstrap.py
@@ -0,0 +1,47 @@
+# Copyright (c) 2012 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""Utilities to enable slaves to determine their master without importing any
+buildbot or twisted code.
+"""
+
+import inspect
+import os
+import sys
+
+from common import chromium_utils
+import config_bootstrap
+
+
+def ImportMasterConfigs(master_name=None, include_internal=True):
+ """Imports master configs.
+
+ Normally a slave can use chromium_utils.GetActiveMaster() to find
+ itself and determine which ActiveMaster to use. In that case, the
+ active master name is passed in as an arg, and we only load the
+ site_config.py that defines it. When testing, the current "slave"
+ won't be found. In that case, we don't know which config to use, so
+ load them all. In either case, masters are assigned as attributes
+ to the config.Master object.
+ """
+ for master in chromium_utils.ListMasters(include_internal=include_internal):
+ path = os.path.join(master, 'master_site_config.py')
+ if os.path.exists(path):
+ local_vars = {}
+ try:
+ execfile(path, local_vars)
+ # pylint: disable=W0703
+ except Exception, e:
+ # Naked exceptions are banned by the style guide but we are
+ # trying to be resilient here.
+ print >> sys.stderr, 'WARNING: cannot exec ' + path
+ print >> sys.stderr, e
+ for (symbol_name, symbol) in local_vars.iteritems():
+ if inspect.isclass(symbol):
+ setattr(symbol, 'local_config_path', master)
+ setattr(config_bootstrap.Master, symbol_name, symbol)
+ # If we have a master_name and it matches, set
+ # config_bootstrap.Master.active_master.
+ if master_name and master_name == symbol_name:
+ setattr(config_bootstrap.Master, 'active_master', symbol)
diff --git a/infra/scripts/legacy/scripts/slave/build_directory.py b/infra/scripts/legacy/scripts/slave/build_directory.py
new file mode 100644
index 0000000..d7aa35b
--- /dev/null
+++ b/infra/scripts/legacy/scripts/slave/build_directory.py
@@ -0,0 +1,111 @@
+# Copyright (c) 2013 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""Functions for discovering and clearing the build directory."""
+
+import os
+import sys
+
+
+def IsFileNewerThanFile(file_a, file_b):
+ """Returns True if file_a's mtime is newer than file_b's."""
+ def getmtime(f):
+ try:
+ return os.path.getmtime(f)
+ except os.error:
+ return 0
+ return getmtime(file_a) >= getmtime(file_b)
+
+
+def AreNinjaFilesNewerThanXcodeFiles(src_dir=None):
+ """Returns True if the generated ninja files are newer than the generated
+ xcode files.
+
+ Parameters:
+ src_dir: The path to the src directory. If None, it's assumed to be
+ at src/ relative to the current working directory.
+ """
+ src_dir = src_dir or 'src'
+ ninja_path = os.path.join(src_dir, 'out', 'Release', 'build.ninja')
+ xcode_path = os.path.join(
+ src_dir, 'build', 'all.xcodeproj', 'project.pbxproj')
+ return IsFileNewerThanFile(ninja_path, xcode_path)
+
+
+def AreNinjaFilesNewerThanMSVSFiles(src_dir=None):
+ """Returns True if the generated ninja files are newer than the generated
+ msvs files.
+
+ Parameters:
+ src_dir: The path to the src directory. If None, it's assumed to be
+ at src/ relative to the current working directory.
+ """
+ src_dir = src_dir or 'src'
+ ninja_path = os.path.join(src_dir, 'out', 'Release', 'build.ninja')
+ msvs_path = os.path.join(src_dir, 'build', 'all.sln')
+ return IsFileNewerThanFile(ninja_path, msvs_path)
+
+
+def GetBuildOutputDirectory(src_dir=None, cros_board=None):
+ """Returns the path to the build directory, relative to the checkout root.
+
+ Assumes that the current working directory is the checkout root.
+ """
+ # src_dir is only needed for compiling v8, which uses compile.py (but no other
+ # of the build scripts), but its source root isn't "src" -- crbug.com/315004
+ if src_dir is None:
+ src_dir = 'src'
+
+ if sys.platform.startswith('linux'):
+ out_dirname = 'out'
+ if cros_board:
+ # Simple chrome workflow output (e.g., "out_x86-generic")
+ out_dirname += '_%s' % (cros_board,)
+ return os.path.join(src_dir, out_dirname)
+ assert not cros_board, "'cros_board' not supported on this platform"
+
+ if sys.platform == 'darwin':
+ if AreNinjaFilesNewerThanXcodeFiles(src_dir):
+ return os.path.join(src_dir, 'out')
+ return os.path.join(src_dir, 'xcodebuild')
+
+ if sys.platform == 'cygwin' or sys.platform.startswith('win'):
+ if AreNinjaFilesNewerThanMSVSFiles(src_dir):
+ return os.path.join(src_dir, 'out')
+ return os.path.join(src_dir, 'build')
+
+ raise NotImplementedError('Unexpected platform %s' % sys.platform)
+
+
+def RmtreeExceptNinjaOrGomaFiles(build_output_dir):
+ """Recursively removes everything but ninja files from a build directory."""
+ for root, _, files in os.walk(build_output_dir, topdown=False):
+ for f in files:
+ # For .manifest in particular, gyp windows ninja generates manifest
+ # files at generation time but clobber nukes at the beginning of
+ # compile, so make sure not to delete those generated files, otherwise
+ # compile will fail.
+ if (f.endswith('.ninja') or f.endswith('.manifest') or
+ f == 'args.gn' or
+ f.startswith('msvc') or # VS runtime DLLs.
+ f.startswith('pgort') or # VS PGO runtime DLL.
+ f in ('gyp-mac-tool', 'gyp-win-tool',
+ 'environment.x86', 'environment.x64')):
+ continue
+ # Keep goma related files.
+ if f == '.goma_deps':
+ continue
+ os.unlink(os.path.join(root, f))
+ # Delete the directory if empty; this works because the walk is bottom-up.
+ try:
+ os.rmdir(root)
+ except OSError, e:
+ if e.errno in (39, 41, 66):
+ # If the directory isn't empty, ignore it.
+ # On Windows, os.rmdir will raise WindowsError with winerror 145,
+ # which e.errno is 41.
+ # On Linux, e.errno is 39.
+ pass
+ else:
+ raise
diff --git a/infra/scripts/legacy/scripts/slave/crash_utils.py b/infra/scripts/legacy/scripts/slave/crash_utils.py
new file mode 100755
index 0000000..db9d1bd
--- /dev/null
+++ b/infra/scripts/legacy/scripts/slave/crash_utils.py
@@ -0,0 +1,46 @@
+#!/usr/bin/python
+# Copyright (c) 2012 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""A utility for crash reporting."""
+
+import os
+import time
+
+from common import chromium_utils
+
+
+def print_new_crash_files(new_crash_files):
+ """Prints all the new crash files."""
+ if new_crash_files:
+ print '\nFound %d new crash file(s), dumping them:' % (
+ len(new_crash_files))
+ for crash_file in new_crash_files:
+ print 'File: ' + crash_file
+ print '=' * (6 + len(crash_file))
+ for crash_line in open(crash_file):
+ print ' ' + crash_line.rstrip()
+ print ''
+
+
+def list_crash_logs():
+ """List all the crash files stored in the user directory."""
+ reports_dir = os.path.expanduser('~/Library/Logs/DiagnosticReports')
+ result = [x for x in chromium_utils.LocateFiles('*.crash', reports_dir)]
+ return result
+
+
+def wait_for_crash_logs():
+ """Sleeps for a while to allow the crash logs to be written.
+
+ The crash reporter runs asynchronously out of process, so when a unittest
+ crashes nothing says the crash log is written immediately. This method is a
+ hack to allow time for the crash logs to be written. Ninety seconds is picked
+ from looking at data on the bots."""
+ # TODO(lakshya): Optimize by polling every 10 seconds for a crash log to be
+ # available instead of waiting for 90 seconds.
+ print ('\nNote: Test finished with non zero status, sleeping for 90s to '
+ 'allow crash files to be written.')
+ time.sleep(90)
+
diff --git a/infra/scripts/legacy/scripts/slave/gtest/__init__.py b/infra/scripts/legacy/scripts/slave/gtest/__init__.py
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/infra/scripts/legacy/scripts/slave/gtest/__init__.py
diff --git a/infra/scripts/legacy/scripts/slave/gtest/json_results_generator.py b/infra/scripts/legacy/scripts/slave/gtest/json_results_generator.py
new file mode 100644
index 0000000..cf207b0
--- /dev/null
+++ b/infra/scripts/legacy/scripts/slave/gtest/json_results_generator.py
@@ -0,0 +1,255 @@
+# Copyright (c) 2012 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""A utility class to generate JSON results from given test results and upload
+them to the specified results server.
+
+"""
+
+from __future__ import with_statement
+
+import codecs
+import logging
+import os
+import time
+
+import simplejson
+from slave.gtest.test_result import TestResult
+from slave.gtest.test_results_uploader import TestResultsUploader
+
+# A JSON results generator for generic tests.
+
+JSON_PREFIX = 'ADD_RESULTS('
+JSON_SUFFIX = ');'
+
+
+def test_did_pass(test_result):
+ return not test_result.failed and test_result.modifier == TestResult.NONE
+
+
+def add_path_to_trie(path, value, trie):
+ """Inserts a single flat directory path and associated value into a directory
+ trie structure."""
+ if not '/' in path:
+ trie[path] = value
+ return
+
+ # we don't use slash
+ # pylint: disable=W0612
+ directory, slash, rest = path.partition('/')
+ if not directory in trie:
+ trie[directory] = {}
+ add_path_to_trie(rest, value, trie[directory])
+
+
+def generate_test_timings_trie(individual_test_timings):
+ """Breaks a test name into chunks by directory and puts the test time as a
+ value in the lowest part, e.g.
+ foo/bar/baz.html: 1ms
+ foo/bar/baz1.html: 3ms
+
+ becomes
+ foo: {
+ bar: {
+ baz.html: 1,
+ baz1.html: 3
+ }
+ }
+ """
+ trie = {}
+ # Only use the timing of the first try of each test.
+ for test_results in individual_test_timings:
+ test = test_results[0].test_name
+
+ add_path_to_trie(test, int(1000 * test_results[-1].test_run_time), trie)
+
+ return trie
+
+
+class JSONResultsGenerator(object):
+ """A JSON results generator for generic tests."""
+
+ FAIL_LABEL = 'FAIL'
+ PASS_LABEL = 'PASS'
+ FLAKY_LABEL = ' '.join([FAIL_LABEL, PASS_LABEL])
+ SKIP_LABEL = 'SKIP'
+
+ ACTUAL = 'actual'
+ BLINK_REVISION = 'blink_revision'
+ BUILD_NUMBER = 'build_number'
+ BUILDER_NAME = 'builder_name'
+ CHROMIUM_REVISION = 'chromium_revision'
+ EXPECTED = 'expected'
+ FAILURE_SUMMARY = 'num_failures_by_type'
+ SECONDS_SINCE_EPOCH = 'seconds_since_epoch'
+ TEST_TIME = 'time'
+ TESTS = 'tests'
+ VERSION = 'version'
+ VERSION_NUMBER = 3
+
+ RESULTS_FILENAME = 'results.json'
+ TIMES_MS_FILENAME = 'times_ms.json'
+ FULL_RESULTS_FILENAME = 'full_results.json'
+
+ def __init__(self, builder_name, build_name, build_number,
+ results_file_base_path, builder_base_url,
+ test_results_map, svn_revisions=None,
+ test_results_server=None,
+ test_type='',
+ master_name='',
+ file_writer=None):
+ """Modifies the results.json file. Grabs it off the archive directory
+ if it is not found locally.
+
+ Args
+ builder_name: the builder name (e.g. Webkit).
+ build_name: the build name (e.g. webkit-rel).
+ build_number: the build number.
+ results_file_base_path: Absolute path to the directory containing the
+ results json file.
+ builder_base_url: the URL where we have the archived test results.
+ If this is None no archived results will be retrieved.
+ test_results_map: A dictionary that maps test_name to a list of
+ TestResult, one for each time the test was retried.
+ svn_revisions: A (json_field_name, revision) pair for SVN
+ repositories that tests rely on. The SVN revision will be
+ included in the JSON with the given json_field_name.
+ test_results_server: server that hosts test results json.
+ test_type: test type string (e.g. 'layout-tests').
+ master_name: the name of the buildbot master.
+ file_writer: if given the parameter is used to write JSON data to a file.
+ The parameter must be the function that takes two arguments, 'file_path'
+ and 'data' to be written into the file_path.
+ """
+ self._builder_name = builder_name
+ self._build_name = build_name
+ self._build_number = build_number
+ self._builder_base_url = builder_base_url
+ self._results_directory = results_file_base_path
+
+ self._test_results_map = test_results_map
+
+ self._svn_revisions = svn_revisions
+ if not self._svn_revisions:
+ self._svn_revisions = {}
+
+ self._test_results_server = test_results_server
+ self._test_type = test_type
+ self._master_name = master_name
+ self._file_writer = file_writer
+
+ def generate_json_output(self):
+ json = self.get_full_results_json()
+ if json:
+ file_path = os.path.join(self._results_directory,
+ self.FULL_RESULTS_FILENAME)
+ self._write_json(json, file_path)
+
+ def generate_times_ms_file(self):
+ times = generate_test_timings_trie(self._test_results_map.values())
+ file_path = os.path.join(self._results_directory, self.TIMES_MS_FILENAME)
+ self._write_json(times, file_path)
+
+ def get_full_results_json(self):
+ results = {self.VERSION: self.VERSION_NUMBER}
+
+ # Metadata generic to all results.
+ results[self.BUILDER_NAME] = self._builder_name
+ results[self.BUILD_NUMBER] = self._build_number
+ results[self.SECONDS_SINCE_EPOCH] = int(time.time())
+ for name, revision in self._svn_revisions:
+ results[name + '_revision'] = revision
+
+ tests = results.setdefault(self.TESTS, {})
+ for test_name in self._test_results_map.iterkeys():
+ tests[test_name] = self._make_test_data(test_name)
+
+ self._insert_failure_map(results)
+
+ return results
+
+ def _insert_failure_map(self, results):
+ # FAIL, PASS, NOTRUN
+ summary = {self.PASS_LABEL: 0, self.FAIL_LABEL: 0, self.SKIP_LABEL: 0}
+ for test_results in self._test_results_map.itervalues():
+ # Use the result of the first test for aggregate statistics. This may
+ # count as failing a test that passed on retry, but it's a more useful
+ # statistic and it's consistent with our other test harnesses.
+ test_result = test_results[0]
+ if test_did_pass(test_result):
+ summary[self.PASS_LABEL] += 1
+ elif test_result.modifier == TestResult.DISABLED:
+ summary[self.SKIP_LABEL] += 1
+ elif test_result.failed:
+ summary[self.FAIL_LABEL] += 1
+
+ results[self.FAILURE_SUMMARY] = summary
+
+ def _make_test_data(self, test_name):
+ test_data = {}
+ expected, actual = self._get_expected_and_actual_results(test_name)
+ test_data[self.EXPECTED] = expected
+ test_data[self.ACTUAL] = actual
+ # Use the timing of the first try, it's a better representative since it
+ # runs under more load than retries.
+ run_time = int(self._test_results_map[test_name][0].test_run_time)
+ test_data[self.TEST_TIME] = run_time
+
+ return test_data
+
+ def _get_expected_and_actual_results(self, test_name):
+ test_results = self._test_results_map[test_name]
+ # Use the modifier of the first try, they should all be the same.
+ modifier = test_results[0].modifier
+
+ if modifier == TestResult.DISABLED:
+ return (self.SKIP_LABEL, self.SKIP_LABEL)
+
+ actual_list = []
+ for test_result in test_results:
+ label = self.FAIL_LABEL if test_result.failed else self.PASS_LABEL
+ actual_list.append(label)
+ actual = " ".join(actual_list)
+
+ if modifier == TestResult.NONE:
+ return (self.PASS_LABEL, actual)
+ if modifier == TestResult.FLAKY:
+ return (self.FLAKY_LABEL, actual)
+ if modifier == TestResult.FAILS:
+ return (self.FAIL_LABEL, actual)
+
+ def upload_json_files(self, json_files):
+ """Uploads the given json_files to the test_results_server (if the
+ test_results_server is given)."""
+ if not self._test_results_server:
+ return
+
+ if not self._master_name:
+ logging.error('--test-results-server was set, but --master-name was not. '
+ 'Not uploading JSON files.')
+ return
+
+ print 'Uploading JSON files for builder: %s' % self._builder_name
+ attrs = [('builder', self._builder_name),
+ ('testtype', self._test_type),
+ ('master', self._master_name)]
+
+ files = [(f, os.path.join(self._results_directory, f)) for f in json_files]
+
+ uploader = TestResultsUploader(self._test_results_server)
+ # Set uploading timeout in case appengine server is having problem.
+ # 120 seconds are more than enough to upload test results.
+ uploader.upload(attrs, files, 120)
+
+ print 'JSON files uploaded.'
+
+ def _write_json(self, json_object, file_path):
+ # Specify separators in order to get compact encoding.
+ json_data = simplejson.dumps(json_object, separators=(',', ':'))
+ json_string = json_data
+ if self._file_writer:
+ self._file_writer(file_path, json_string)
+ else:
+ with codecs.open(file_path, 'w', 'utf8') as f:
+ f.write(json_string)
diff --git a/infra/scripts/legacy/scripts/slave/gtest/networktransaction.py b/infra/scripts/legacy/scripts/slave/gtest/networktransaction.py
new file mode 100644
index 0000000..10327cb
--- /dev/null
+++ b/infra/scripts/legacy/scripts/slave/gtest/networktransaction.py
@@ -0,0 +1,46 @@
+# Copyright (c) 2011 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import logging
+import time
+import urllib2
+
+
+class NetworkTimeout(Exception):
+ pass
+
+
+class NetworkTransaction(object):
+ def __init__(self, initial_backoff_seconds=10, grown_factor=1.5,
+ timeout_seconds=(10 * 60), convert_404_to_None=False):
+ self._initial_backoff_seconds = initial_backoff_seconds
+ self._backoff_seconds = initial_backoff_seconds
+ self._grown_factor = grown_factor
+ self._timeout_seconds = timeout_seconds
+ self._convert_404_to_None = convert_404_to_None
+ self._total_sleep = 0
+
+ def run(self, request):
+ self._total_sleep = 0
+ self._backoff_seconds = self._initial_backoff_seconds
+ while True:
+ try:
+ return request()
+ except urllib2.HTTPError, e:
+ if self._convert_404_to_None and e.code == 404:
+ return None
+ self._check_for_timeout()
+ logging.warn("Received HTTP status %s loading \"%s\". "
+ "Retrying in %s seconds...",
+ e.code, e.filename, self._backoff_seconds)
+ self._sleep()
+
+ def _check_for_timeout(self):
+ if self._total_sleep + self._backoff_seconds > self._timeout_seconds:
+ raise NetworkTimeout()
+
+ def _sleep(self):
+ time.sleep(self._backoff_seconds)
+ self._total_sleep += self._backoff_seconds
+ self._backoff_seconds *= self._grown_factor
diff --git a/infra/scripts/legacy/scripts/slave/gtest/test_result.py b/infra/scripts/legacy/scripts/slave/gtest/test_result.py
new file mode 100644
index 0000000..3efd37d
--- /dev/null
+++ b/infra/scripts/legacy/scripts/slave/gtest/test_result.py
@@ -0,0 +1,42 @@
+# Copyright (c) 2012 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+
+def canonical_name(name):
+ new_name = name.replace('FLAKY_', '', 1)
+ new_name = new_name.replace('FAILS_', '', 1)
+ new_name = new_name.replace('DISABLED_', '', 1)
+ return new_name
+
+
+class TestResult(object):
+ """A simple class that represents a single test result."""
+
+ # Test modifier constants.
+ (NONE, FAILS, FLAKY, DISABLED) = range(4)
+
+ def __init__(self, test, failed, not_run=False, elapsed_time=0):
+ self.test_name = canonical_name(test)
+ self.failed = failed
+ self.test_run_time = elapsed_time
+
+ test_name = test
+ try:
+ test_name = test.split('.')[1]
+ except IndexError:
+ pass
+
+ if not_run:
+ self.modifier = self.DISABLED
+ elif test_name.startswith('FAILS_'):
+ self.modifier = self.FAILS
+ elif test_name.startswith('FLAKY_'):
+ self.modifier = self.FLAKY
+ elif test_name.startswith('DISABLED_'):
+ self.modifier = self.DISABLED
+ else:
+ self.modifier = self.NONE
+
+ def fixable(self):
+ return self.failed or self.modifier == self.DISABLED
diff --git a/infra/scripts/legacy/scripts/slave/gtest/test_results_uploader.py b/infra/scripts/legacy/scripts/slave/gtest/test_results_uploader.py
new file mode 100644
index 0000000..ead70fb
--- /dev/null
+++ b/infra/scripts/legacy/scripts/slave/gtest/test_results_uploader.py
@@ -0,0 +1,32 @@
+# Copyright (c) 2011 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import codecs
+import socket
+
+from common import url_helper
+from slave.gtest.networktransaction import NetworkTransaction
+
+
+class TestResultsUploader(object):
+ def __init__(self, host):
+ self._host = host
+
+ def _upload_files(self, attrs, file_objs):
+ url = "http://%s/testfile/upload" % self._host
+ url_helper.upload_files(url, attrs, file_objs)
+
+ def upload(self, params, files, timeout_seconds):
+ file_objs = []
+ for filename, path in files:
+ with codecs.open(path, "rb") as f:
+ file_objs.append(('file', filename, f.read()))
+
+ orig_timeout = socket.getdefaulttimeout()
+ try:
+ socket.setdefaulttimeout(timeout_seconds)
+ NetworkTransaction(timeout_seconds=timeout_seconds).run(
+ lambda: self._upload_files(params, file_objs))
+ finally:
+ socket.setdefaulttimeout(orig_timeout)
diff --git a/infra/scripts/legacy/scripts/slave/gtest_slave_utils.py b/infra/scripts/legacy/scripts/slave/gtest_slave_utils.py
new file mode 100755
index 0000000..55fe843
--- /dev/null
+++ b/infra/scripts/legacy/scripts/slave/gtest_slave_utils.py
@@ -0,0 +1,273 @@
+#!/usr/bin/env python
+# Copyright (c) 2012 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import logging
+import optparse
+import os
+import re
+import sys
+
+from common import gtest_utils
+from xml.dom import minidom
+from slave.gtest.json_results_generator import JSONResultsGenerator
+from slave.gtest.test_result import canonical_name
+from slave.gtest.test_result import TestResult
+
+
+GENERATE_JSON_RESULTS_OPTIONS = [
+ 'builder_name', 'build_name', 'build_number', 'results_directory',
+ 'builder_base_url', 'webkit_revision', 'chrome_revision',
+ 'test_results_server', 'test_type', 'master_name']
+
+FULL_RESULTS_FILENAME = 'full_results.json'
+TIMES_MS_FILENAME = 'times_ms.json'
+
+
+# Note: GTestUnexpectedDeathTracker is being deprecated in favor of
+# common.gtest_utils.GTestLogParser. See scripts/slave/runtest.py for details.
+class GTestUnexpectedDeathTracker(object):
+ """A lightweight version of log parser that keeps track of running tests
+ for unexpected timeout or crash."""
+
+ def __init__(self):
+ self._current_test = None
+ self._completed = False
+ self._test_start = re.compile(r'\[\s+RUN\s+\] (\w+\.\w+)')
+ self._test_ok = re.compile(r'\[\s+OK\s+\] (\w+\.\w+)')
+ self._test_fail = re.compile(r'\[\s+FAILED\s+\] (\w+\.\w+)')
+ self._test_passed = re.compile(r'\[\s+PASSED\s+\] \d+ tests?.')
+
+ self._failed_tests = set()
+
+ def OnReceiveLine(self, line):
+ results = self._test_start.search(line)
+ if results:
+ self._current_test = results.group(1)
+ return
+
+ results = self._test_ok.search(line)
+ if results:
+ self._current_test = ''
+ return
+
+ results = self._test_fail.search(line)
+ if results:
+ self._failed_tests.add(results.group(1))
+ self._current_test = ''
+ return
+
+ results = self._test_passed.search(line)
+ if results:
+ self._completed = True
+ self._current_test = ''
+ return
+
+ def GetResultsMap(self):
+ """Returns a map of TestResults."""
+
+ if self._current_test:
+ self._failed_tests.add(self._current_test)
+
+ test_results_map = dict()
+ for test in self._failed_tests:
+ test_results_map[canonical_name(test)] = [TestResult(test, failed=True)]
+
+ return test_results_map
+
+ def CompletedWithoutFailure(self):
+ """Returns True if all tests completed and no tests failed unexpectedly."""
+
+ if not self._completed:
+ return False
+
+ for test in self._failed_tests:
+ test_modifier = TestResult(test, failed=True).modifier
+ if test_modifier not in (TestResult.FAILS, TestResult.FLAKY):
+ return False
+
+ return True
+
+
+def GetResultsMap(observer):
+ """Returns a map of TestResults."""
+
+ test_results_map = dict()
+ tests = (observer.FailedTests(include_fails=True, include_flaky=True) +
+ observer.PassedTests())
+ for test in tests:
+ key = canonical_name(test)
+ test_results_map[key] = []
+ tries = observer.TriesForTest(test)
+ for test_try in tries:
+ # FIXME: Store the actual failure type so we can expose whether the test
+ # crashed or timed out. See crbug.com/249965.
+ failed = (test_try != gtest_utils.TEST_SUCCESS_LABEL)
+ test_results_map[key].append(TestResult(test, failed=failed))
+
+ return test_results_map
+
+
+def GetResultsMapFromXML(results_xml):
+ """Parse the given results XML file and returns a map of TestResults."""
+
+ results_xml_file = None
+ try:
+ results_xml_file = open(results_xml)
+ except IOError:
+ logging.error('Cannot open file %s', results_xml)
+ return dict()
+ node = minidom.parse(results_xml_file).documentElement
+ results_xml_file.close()
+
+ test_results_map = dict()
+ testcases = node.getElementsByTagName('testcase')
+
+ for testcase in testcases:
+ name = testcase.getAttribute('name')
+ classname = testcase.getAttribute('classname')
+ test_name = '%s.%s' % (classname, name)
+
+ failures = testcase.getElementsByTagName('failure')
+ not_run = testcase.getAttribute('status') == 'notrun'
+ elapsed = float(testcase.getAttribute('time'))
+ result = TestResult(test_name,
+ failed=bool(failures),
+ not_run=not_run,
+ elapsed_time=elapsed)
+ test_results_map[canonical_name(test_name)] = [result]
+
+ return test_results_map
+
+
+def GenerateJSONResults(test_results_map, options):
+ """Generates a JSON results file from the given test_results_map,
+ returning the associated generator for use with UploadJSONResults, below.
+
+ Args:
+ test_results_map: A map of TestResult.
+ options: options for json generation. See GENERATE_JSON_RESULTS_OPTIONS
+ and OptionParser's help messages below for expected options and their
+ details.
+ """
+
+ if not test_results_map:
+ logging.warn('No input results map was given.')
+ return
+
+ # Make sure we have all the required options (set empty string otherwise).
+ for opt in GENERATE_JSON_RESULTS_OPTIONS:
+ if not getattr(options, opt, None):
+ logging.warn('No value is given for option %s', opt)
+ setattr(options, opt, '')
+
+ try:
+ int(options.build_number)
+ except ValueError:
+ logging.error('options.build_number needs to be a number: %s',
+ options.build_number)
+ return
+
+ if not os.path.exists(options.results_directory):
+ os.makedirs(options.results_directory)
+
+ print('Generating json: '
+ 'builder_name:%s, build_name:%s, build_number:%s, '
+ 'results_directory:%s, builder_base_url:%s, '
+ 'webkit_revision:%s, chrome_revision:%s '
+ 'test_results_server:%s, test_type:%s, master_name:%s' %
+ (options.builder_name, options.build_name, options.build_number,
+ options.results_directory, options.builder_base_url,
+ options.webkit_revision, options.chrome_revision,
+ options.test_results_server, options.test_type,
+ options.master_name))
+
+ generator = JSONResultsGenerator(
+ options.builder_name, options.build_name, options.build_number,
+ options.results_directory, options.builder_base_url,
+ test_results_map,
+ svn_revisions=(('blink', options.webkit_revision),
+ ('chromium', options.chrome_revision)),
+ test_results_server=options.test_results_server,
+ test_type=options.test_type,
+ master_name=options.master_name)
+ generator.generate_json_output()
+ generator.generate_times_ms_file()
+ return generator
+
+def UploadJSONResults(generator):
+ """Conditionally uploads the results from GenerateJSONResults if
+ test_results_server was given."""
+ if generator:
+ generator.upload_json_files([FULL_RESULTS_FILENAME,
+ TIMES_MS_FILENAME])
+
+# For command-line testing.
+def main():
+ # Builder base URL where we have the archived test results.
+ # (Note: to be deprecated)
+ BUILDER_BASE_URL = 'http://build.chromium.org/buildbot/gtest_results/'
+
+ option_parser = optparse.OptionParser()
+ option_parser.add_option('', '--test-type', default='',
+ help='Test type that generated the results XML,'
+ ' e.g. unit-tests.')
+ option_parser.add_option('', '--results-directory', default='./',
+ help='Output results directory source dir.')
+ option_parser.add_option('', '--input-results-xml', default='',
+ help='Test results xml file (input for us).'
+ ' default is TEST_TYPE.xml')
+ option_parser.add_option('', '--builder-base-url', default='',
+ help=('A URL where we have the archived test '
+ 'results. (default=%sTEST_TYPE_results/)'
+ % BUILDER_BASE_URL))
+ option_parser.add_option('', '--builder-name',
+ default='DUMMY_BUILDER_NAME',
+ help='The name of the builder shown on the '
+ 'waterfall running this script e.g. WebKit.')
+ option_parser.add_option('', '--build-name',
+ default='DUMMY_BUILD_NAME',
+ help='The name of the builder used in its path, '
+ 'e.g. webkit-rel.')
+ option_parser.add_option('', '--build-number', default='',
+ help='The build number of the builder running'
+ 'this script.')
+ option_parser.add_option('', '--test-results-server',
+ default='',
+ help='The test results server to upload the '
+ 'results.')
+ option_parser.add_option('--master-name', default='',
+ help='The name of the buildbot master. '
+ 'Both test-results-server and master-name '
+ 'need to be specified to upload the results '
+ 'to the server.')
+ option_parser.add_option('--webkit-revision', default='0',
+ help='The WebKit revision being tested. If not '
+ 'given, defaults to 0.')
+ option_parser.add_option('--chrome-revision', default='0',
+ help='The Chromium revision being tested. If not '
+ 'given, defaults to 0.')
+
+ options = option_parser.parse_args()[0]
+
+ if not options.test_type:
+ logging.error('--test-type needs to be specified.')
+ sys.exit(1)
+
+ if not options.input_results_xml:
+ logging.error('--input-results-xml needs to be specified.')
+ sys.exit(1)
+
+ if options.test_results_server and not options.master_name:
+ logging.warn('--test-results-server is given but '
+ '--master-name is not specified; the results won\'t be '
+ 'uploaded to the server.')
+
+ results_map = GetResultsMapFromXML(options.input_results_xml)
+ generator = GenerateJSONResults(results_map, options)
+ UploadJSONResults(generator)
+
+
+if '__main__' == __name__:
+ main()
diff --git a/infra/scripts/legacy/scripts/slave/performance_log_processor.py b/infra/scripts/legacy/scripts/slave/performance_log_processor.py
new file mode 100644
index 0000000..db110f3
--- /dev/null
+++ b/infra/scripts/legacy/scripts/slave/performance_log_processor.py
@@ -0,0 +1,844 @@
+# Copyright 2014 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""This module contains PerformanceLogProcessor and subclasses.
+
+Several performance tests have complicated log output, this module is intended
+to help buildsteps parse these logs and identify if tests had anomalies.
+
+The classes in this file all have the same method ProcessLine, just like
+GTestLogParser in //tools/build/scripts/common/gtest_utils.py. They also
+construct a set of files which are used for graphing.
+
+Note: This module is doomed to be deprecated in the future, as Telemetry
+results will be passed more directly to the new performance dashboard.
+"""
+
+import collections
+import json
+import logging
+import os
+import re
+
+from common import chromium_utils
+
+# TODO(crbug.com/403564).
+import config
+
+# Status codes that can be returned by the evaluateCommand method.
+# From buildbot.status.builder.
+# See: http://docs.buildbot.net/current/developer/results.html
+SUCCESS, WARNINGS, FAILURE, SKIPPED, EXCEPTION, RETRY = range(6)
+
+
+class PerformanceLogProcessor(object):
+ """Parent class for performance log parsers.
+
+ The only essential public method that subclasses must define is the method
+ ProcessLine, which takes one line of a test output log and uses it
+ to change the internal state of the PerformanceLogProcessor object,
+ so that methods such as PerformanceLogs return the right thing.
+ """
+
+ # The file perf_expectations.json holds performance expectations.
+ # For more info, see: http://goo.gl/BhYvDa
+ PERF_EXPECTATIONS_PATH = 'src/tools/perf_expectations/'
+
+ def __init__(self, revision=None, factory_properties=None,
+ build_properties=None, webkit_revision='undefined'):
+ """Initializes the log processor.
+
+ Args:
+ revision: Revision number; this currently could also be a git number.
+ It is sent to the perf dashboard to be used as the x-value.
+ factory_properties: Factory properties dict.
+ build_properties: Build properties dict.
+ webkit_revision: Blink revision number.
+ """
+ if factory_properties is None:
+ factory_properties = {}
+
+ # Performance regression/speedup alerts.
+ self._read_expectations = False
+
+ self._perf_id = factory_properties.get('perf_id')
+ self._perf_name = factory_properties.get('perf_name')
+ self._perf_filename = factory_properties.get('perf_filename')
+ self._test_name = factory_properties.get('test_name')
+
+ self._perf_data = {}
+ self._perf_test_keys = {}
+ self._perf_ref_keys = {}
+ self._perf_regress = []
+ self._perf_improve = []
+
+ # A dict mapping output file names to lists of lines in a file.
+ self._output = {}
+
+ # Whether or not the processing has been finalized (i.e. whether
+ # self._FinalizeProcessing has been called.)
+ self._finalized = False
+
+ # The text summary will be built by other methods as we go.
+ # This is a list of strings with messages about the processing.
+ self._text_summary = []
+
+ # Enable expectations if the local configuration supports it.
+ self._expectations = (factory_properties.get('expectations')
+ and self._perf_id and self._perf_name)
+ if self._expectations and not self._perf_filename:
+ self._perf_filename = os.path.join(self.PERF_EXPECTATIONS_PATH,
+ 'perf_expectations.json')
+
+ if revision:
+ self._revision = revision
+ else:
+ raise ValueError('Must provide a revision to PerformanceLogProcessor.')
+
+ self._webkit_revision = webkit_revision
+
+ if not build_properties:
+ build_properties = {}
+ self._git_revision = build_properties.get('git_revision', 'undefined')
+ self._version = build_properties.get('version', 'undefined')
+ self._channel = build_properties.get('channel', 'undefined')
+ self._webrtc_revision = build_properties.get('got_webrtc_revision',
+ 'undefined')
+
+ self._v8_revision = 'undefined'
+ if factory_properties.get('show_v8_revision'):
+ self._v8_revision = build_properties.get('got_v8_revision', 'undefined')
+
+ self._percentiles = [.1, .25, .5, .75, .90, .95, .99]
+
+ def IsChartJson(self): # pylint: disable=R0201
+ """This is not the new telemetry --chartjson output format."""
+ return False
+
+ def PerformanceLogs(self):
+ if not self._finalized:
+ self._FinalizeProcessing()
+ self._finalized = True
+ return self._output
+
+ def PerformanceSummary(self):
+ """Returns a list of strings about performance changes and other info."""
+ if not self._finalized:
+ self._FinalizeProcessing()
+ self._finalized = True
+ return self.PerformanceChanges() + self._text_summary
+
+ def _FinalizeProcessing(self):
+ """Hook for subclasses to do final operations before output is returned."""
+ # This method is to be defined by inheriting classes.
+ pass
+
+ def AppendLog(self, filename, data):
+ """Appends some data to an output file."""
+ self._output[filename] = self._output.get(filename, []) + data
+
+ def PrependLog(self, filename, data):
+ """Prepends some data to an output file."""
+ self._output[filename] = data + self._output.get(filename, [])
+
+ def FailedTests(self): # pylint: disable=R0201
+ return []
+
+ def MemoryToolReportHashes(self): # pylint: disable=R0201
+ return []
+
+ def ParsingErrors(self): # pylint: disable=R0201
+ return []
+
+ def LoadPerformanceExpectationsData(self, all_perf_data):
+ """Load the expectations data.
+
+ All keys in perf_expectations have 4 components:
+ slave/test/graph/trace
+
+ LoadPerformanceExpectationsData finds all keys that match the initial
+ portion of the string ("slave/test") and adds the graph and result
+ portions to the expected performance structure.
+ """
+
+ for perf_key in all_perf_data.keys():
+ # tools/perf_expectations/tests/perf_expectations_unittest.py should have
+ # a matching regular expression.
+ m = re.search(r'^' + self._perf_name + '/' + self._test_name +
+ r'/([\w\.-]+)/([\w\.-]+)$', perf_key)
+ if not m:
+ continue
+
+ perf_data = all_perf_data[perf_key]
+ graph = m.group(1)
+ trace = m.group(2)
+
+ # By default, all perf data is type=relative.
+ perf_data.setdefault('type', 'relative')
+
+ # By default, relative perf data is compare against the fqtn+'_ref'.
+ if perf_data['type'] == 'relative' and 'ref' not in perf_data:
+ perf_data['ref'] = '%s/%s/%s/%s_ref' % (
+ self._perf_name, self._test_name, graph, trace)
+
+ # For each test key, we add a reference in _perf_test_keys to perf_data.
+ self._perf_test_keys.setdefault(perf_key, [])
+ self._perf_test_keys[perf_key].append(perf_data)
+
+ # For each ref key, we add a reference in _perf_ref_keys to perf_data.
+ if 'ref' in perf_data:
+ self._perf_ref_keys.setdefault(perf_data['ref'], [])
+ self._perf_ref_keys[perf_data['ref']].append(perf_data)
+
+ self._perf_data.setdefault(graph, {})
+ self._perf_data[graph][trace] = perf_data
+
+ def LoadPerformanceExpectations(self):
+ if not self._expectations:
+ # self._expectations is false when a given factory doesn't enable
+ # expectations, or doesn't have both perf_id and perf_name set.
+ return
+ try:
+ perf_file = open(self._perf_filename, 'r')
+ except IOError, e:
+ logging.error('I/O Error reading expectations %s(%s): %s' %
+ (self._perf_filename, e.errno, e.strerror))
+ return
+
+ perf_data = {}
+ if perf_file:
+ try:
+ perf_data = json.load(perf_file)
+ except ValueError:
+ perf_file.seek(0)
+ logging.error('Error parsing expectations %s: \'%s\'' %
+ (self._perf_filename, perf_file.read().strip()))
+ perf_file.close()
+
+ # Find this perf/test entry
+ if perf_data and perf_data.has_key('load') and perf_data['load']:
+ self.LoadPerformanceExpectationsData(perf_data)
+ else:
+ logging.error('not loading perf expectations: perf_data is disabled')
+ self._read_expectations = True
+
+ def TrackActualPerformance(self, graph=None, trace=None, value=None,
+ stddev=None):
+ """Set actual performance data when we come across useful values.
+
+ trace will be of the form "RESULTTYPE" or "RESULTTYPE_ref".
+ A trace with _ref in its name refers to a reference build.
+
+ Common result types for page cyclers: t, vm_rss_f_r, IO_b_b, etc.
+ A test's result types vary between test types. Currently, a test
+ only needs to output the appropriate text format to embed a new
+ result type.
+ """
+
+ fqtn = '%s/%s/%s/%s' % (self._perf_name, self._test_name, graph, trace)
+ if fqtn in self._perf_test_keys:
+ for perf_data in self._perf_test_keys[fqtn]:
+ perf_data['actual_test'] = value
+ perf_data['actual_var'] = stddev
+
+ if perf_data['type'] == 'absolute' and 'actual_test' in perf_data:
+ perf_data['actual_delta'] = perf_data['actual_test']
+
+ elif perf_data['type'] == 'relative':
+ if 'actual_test' in perf_data and 'actual_ref' in perf_data:
+ perf_data['actual_delta'] = (
+ perf_data['actual_test'] - perf_data['actual_ref'])
+
+ if fqtn in self._perf_ref_keys:
+ for perf_data in self._perf_ref_keys[fqtn]:
+ perf_data['actual_ref'] = value
+
+ if 'actual_test' in perf_data and 'actual_ref' in perf_data:
+ perf_data['actual_delta'] = (
+ perf_data['actual_test'] - perf_data['actual_ref'])
+
+ def PerformanceChangesAsText(self):
+ """Returns a list of strings which describe performance changes."""
+
+ text = []
+
+ if self._expectations and not self._read_expectations:
+ text.append('MISS_EXPECTATIONS')
+
+ if self._perf_regress:
+ text.append('PERF_REGRESS: ' + ', '.join(self._perf_regress))
+
+ if self._perf_improve:
+ text.append('PERF_IMPROVE: ' + ', '.join(self._perf_improve))
+
+ return text
+
+ def ComparePerformance(self, graph, trace):
+ """Populates internal data about improvements and regressions."""
+ # Skip graphs and traces we don't expect values for.
+ if not graph in self._perf_data or not trace in self._perf_data[graph]:
+ return
+
+ perfdata = self._perf_data[graph][trace]
+ graph_result = graph + '/' + trace
+
+ # Skip result types that didn't calculate a delta.
+ if not 'actual_delta' in perfdata:
+ return
+
+ # Skip result types that don't have regress/improve values.
+ if 'regress' not in perfdata or 'improve' not in perfdata:
+ return
+
+ # Set the high and low performance tests.
+ # The actual delta needs to be within this range to keep the perf test
+ # green. If the results fall above or below this range, the test will go
+ # red (signaling a regression) or orange (signaling a speedup).
+ actual = perfdata['actual_delta']
+ regress = perfdata['regress']
+ improve = perfdata['improve']
+ if (('better' in perfdata and perfdata['better'] == 'lower') or
+ ('better' not in perfdata and regress > improve)):
+ # The "lower is better" case. (ie. time results)
+ if actual < improve:
+ ratio = 1 - _Divide(actual, improve)
+ self._perf_improve.append('%s (%s)' % (graph_result,
+ _FormatPercentage(ratio)))
+ elif actual > regress:
+ ratio = _Divide(actual, regress) - 1
+ self._perf_regress.append('%s (%s)' % (graph_result,
+ _FormatPercentage(ratio)))
+ else:
+ # The "higher is better" case. (ie. score results)
+ if actual > improve:
+ ratio = _Divide(actual, improve) - 1
+ self._perf_improve.append('%s (%s)' % (graph_result,
+ _FormatPercentage(ratio)))
+ elif actual < regress:
+ ratio = 1 - _Divide(actual, regress)
+ self._perf_regress.append('%s (%s)' % (graph_result,
+ _FormatPercentage(ratio)))
+
+ def PerformanceChanges(self):
+ """Compares actual and expected results.
+
+ Returns:
+ A list of strings indicating improvements or regressions.
+ """
+ # Compare actual and expected results.
+ for graph in self._perf_data:
+ for trace in self._perf_data[graph]:
+ self.ComparePerformance(graph, trace)
+
+ return self.PerformanceChangesAsText()
+
+ # Unused argument cmd.
+ # pylint: disable=W0613
+ def evaluateCommand(self, cmd):
+ """Returns a status code indicating success, failure, etc.
+
+ See: http://docs.buildbot.net/current/developer/cls-buildsteps.html
+
+ Args:
+ cmd: A command object. Not used here.
+
+ Returns:
+ A status code (One of SUCCESS, WARNINGS, FAILURE, etc.)
+ """
+ if self._expectations and not self._read_expectations:
+ return WARNINGS
+
+ # make sure regression and improvement logs are calculated
+ self.PerformanceSummary()
+
+ if self._perf_regress:
+ return FAILURE
+
+ if self._perf_improve:
+ return WARNINGS
+
+ # There was no change in performance, report success.
+ return SUCCESS
+
+ def ProcessLine(self, line):
+ """Process one line of a log file."""
+ # This method must be overridden by subclass
+ pass
+
+
+class GraphingLogProcessor(PerformanceLogProcessor):
+ """Parent class for any log processor expecting standard data to be graphed.
+
+ The log will be parsed looking for any lines of the forms:
+ <*>RESULT <graph_name>: <trace_name>= <value> <units>
+ or
+ <*>RESULT <graph_name>: <trace_name>= [<value>,value,value,...] <units>
+ or
+ <*>RESULT <graph_name>: <trace_name>= {<mean>, <std deviation>} <units>
+
+ For example,
+ *RESULT vm_final_browser: OneTab= 8488 kb
+ RESULT startup: ref= [167.00,148.00,146.00,142.00] ms
+ RESULT TabCapturePerformance_foo: Capture= {30.7, 1.45} ms
+
+ The leading * is optional; it indicates that the data from that line should
+ be considered "important", which may mean for example that it's graphed by
+ default.
+
+ If multiple values are given in [], their mean and (sample) standard
+ deviation will be written; if only one value is given, that will be written.
+ A trailing comma is permitted in the list of values.
+
+ NOTE: All lines except for RESULT lines are ignored, including the Avg and
+ Stddev lines output by Telemetry!
+
+ Any of the <fields> except <value> may be empty, in which case the
+ not-terribly-useful defaults will be used. The <graph_name> and <trace_name>
+ should not contain any spaces, colons (:) nor equals-signs (=). Furthermore,
+ the <trace_name> will be used on the waterfall display, so it should be kept
+ short. If the trace_name ends with '_ref', it will be interpreted as a
+ reference value, and shown alongside the corresponding main value on the
+ waterfall.
+
+ Semantic note: The terms graph and chart are used interchangeably here.
+ """
+
+ # The file into which the GraphingLogProcessor will save a list of graph
+ # names for use by the JS doing the plotting.
+ GRAPH_LIST = config.Master.perf_graph_list
+
+ RESULTS_REGEX = re.compile(r'(?P<IMPORTANT>\*)?RESULT '
+ r'(?P<GRAPH>[^:]*): (?P<TRACE>[^=]*)= '
+ r'(?P<VALUE>[\{\[]?[-\d\., ]+[\}\]]?)('
+ r' ?(?P<UNITS>.+))?')
+ HISTOGRAM_REGEX = re.compile(r'(?P<IMPORTANT>\*)?HISTOGRAM '
+ r'(?P<GRAPH>[^:]*): (?P<TRACE>[^=]*)= '
+ r'(?P<VALUE_JSON>{.*})(?P<UNITS>.+)?')
+
+ class Trace(object):
+ """Encapsulates data for one trace. Here, this means one point."""
+
+ def __init__(self):
+ self.important = False
+ self.value = 0.0
+ self.stddev = 0.0
+
+ def __str__(self):
+ result = _FormatHumanReadable(self.value)
+ if self.stddev:
+ result += '+/-%s' % _FormatHumanReadable(self.stddev)
+ return result
+
+ class Graph(object):
+ """Encapsulates a set of points that should appear on the same graph."""
+
+ def __init__(self):
+ self.units = None
+ self.traces = {}
+
+ def IsImportant(self):
+ """A graph is considered important if any of its traces is important."""
+ for trace in self.traces.itervalues():
+ if trace.important:
+ return True
+ return False
+
+ def BuildTracesDict(self):
+ """Returns a dictionary mapping trace names to [value, stddev]."""
+ traces_dict = {}
+ for name, trace in self.traces.items():
+ traces_dict[name] = [str(trace.value), str(trace.stddev)]
+ return traces_dict
+
+ def __init__(self, *args, **kwargs):
+ """Initiates this log processor."""
+ PerformanceLogProcessor.__init__(self, *args, **kwargs)
+
+ # A dict of Graph objects, by name.
+ self._graphs = {}
+
+ # Load performance expectations for this test.
+ self.LoadPerformanceExpectations()
+
+ def ProcessLine(self, line):
+ """Processes one result line, and updates the state accordingly."""
+ results_match = self.RESULTS_REGEX.search(line)
+ histogram_match = self.HISTOGRAM_REGEX.search(line)
+ if results_match:
+ self._ProcessResultLine(results_match)
+ elif histogram_match:
+ self._ProcessHistogramLine(histogram_match)
+
+ def _ProcessResultLine(self, line_match):
+ """Processes a line that matches the standard RESULT line format.
+
+ Args:
+ line_match: A MatchObject as returned by re.search.
+ """
+ match_dict = line_match.groupdict()
+ graph_name = match_dict['GRAPH'].strip()
+ trace_name = match_dict['TRACE'].strip()
+
+ graph = self._graphs.get(graph_name, self.Graph())
+ graph.units = match_dict['UNITS'] or ''
+ trace = graph.traces.get(trace_name, self.Trace())
+ trace.value = match_dict['VALUE']
+ trace.important = match_dict['IMPORTANT'] or False
+
+ # Compute the mean and standard deviation for a multiple-valued item,
+ # or the numerical value of a single-valued item.
+ if trace.value.startswith('['):
+ try:
+ value_list = [float(x) for x in trace.value.strip('[],').split(',')]
+ except ValueError:
+ # Report, but ignore, corrupted data lines. (Lines that are so badly
+ # broken that they don't even match the RESULTS_REGEX won't be
+ # detected.)
+ logging.warning("Bad test output: '%s'" % trace.value.strip())
+ return
+ trace.value, trace.stddev, filedata = self._CalculateStatistics(
+ value_list, trace_name)
+ assert filedata is not None
+ for filename in filedata:
+ self.PrependLog(filename, filedata[filename])
+ elif trace.value.startswith('{'):
+ stripped = trace.value.strip('{},')
+ try:
+ trace.value, trace.stddev = [float(x) for x in stripped.split(',')]
+ except ValueError:
+ logging.warning("Bad test output: '%s'" % trace.value.strip())
+ return
+ else:
+ try:
+ trace.value = float(trace.value)
+ except ValueError:
+ logging.warning("Bad test output: '%s'" % trace.value.strip())
+ return
+
+ graph.traces[trace_name] = trace
+ self._graphs[graph_name] = graph
+
+ # Store values in actual performance.
+ self.TrackActualPerformance(graph=graph_name, trace=trace_name,
+ value=trace.value, stddev=trace.stddev)
+
+ def _ProcessHistogramLine(self, line_match):
+ """Processes a line that matches the HISTOGRAM line format.
+
+ Args:
+ line_match: A MatchObject as returned by re.search.
+ """
+ match_dict = line_match.groupdict()
+ graph_name = match_dict['GRAPH'].strip()
+ trace_name = match_dict['TRACE'].strip()
+ units = (match_dict['UNITS'] or '').strip()
+ histogram_json = match_dict['VALUE_JSON']
+ important = match_dict['IMPORTANT'] or False
+ try:
+ histogram_data = json.loads(histogram_json)
+ except ValueError:
+ # Report, but ignore, corrupted data lines. (Lines that are so badly
+ # broken that they don't even match the HISTOGRAM_REGEX won't be
+ # detected.)
+ logging.warning("Bad test output: '%s'" % histogram_json.strip())
+ return
+
+ # Compute percentile data, create a graph for all percentile values.
+ percentiles = self._CalculatePercentiles(histogram_data, trace_name)
+ for i in percentiles:
+ percentile_graph_name = graph_name + "_" + str(i['percentile'])
+ graph = self._graphs.get(percentile_graph_name, self.Graph())
+ graph.units = units
+ trace = graph.traces.get(trace_name, self.Trace())
+ trace.value = i['value']
+ trace.important = important
+ graph.traces[trace_name] = trace
+ self._graphs[percentile_graph_name] = graph
+ self.TrackActualPerformance(graph=percentile_graph_name,
+ trace=trace_name,
+ value=i['value'])
+
+ # Compute geometric mean and standard deviation.
+ graph = self._graphs.get(graph_name, self.Graph())
+ graph.units = units
+ trace = graph.traces.get(trace_name, self.Trace())
+ trace.value, trace.stddev = self._CalculateHistogramStatistics(
+ histogram_data, trace_name)
+ trace.important = important
+ graph.traces[trace_name] = trace
+ self._graphs[graph_name] = graph
+ self.TrackActualPerformance(graph=graph_name, trace=trace_name,
+ value=trace.value, stddev=trace.stddev)
+
+ # _CalculateStatistics needs to be a member function.
+ # pylint: disable=R0201
+ # Unused argument value_list.
+ # pylint: disable=W0613
+ def _CalculateStatistics(self, value_list, trace_name):
+ """Returns a tuple with some statistics based on the given value list.
+
+ This method may be overridden by subclasses wanting a different standard
+ deviation calcuation (or some other sort of error value entirely).
+
+ Args:
+ value_list: the list of values to use in the calculation
+ trace_name: the trace that produced the data (not used in the base
+ implementation, but subclasses may use it)
+
+ Returns:
+ A 3-tuple - mean, standard deviation, and a dict which is either
+ empty or contains information about some file contents.
+ """
+ mean, stddev = chromium_utils.MeanAndStandardDeviation(value_list)
+ return mean, stddev, {}
+
+ def _CalculatePercentiles(self, histogram, trace_name):
+ """Returns a list of percentile values from a histogram.
+
+ This method may be overridden by subclasses.
+
+ Args:
+ histogram: histogram data (relevant keys: "buckets", and for each bucket,
+ "min", "max" and "count").
+ trace_name: the trace that produced the data (not used in the base
+ implementation, but subclasses may use it)
+
+ Returns:
+ A list of dicts, each of which has the keys "percentile" and "value".
+ """
+ return chromium_utils.HistogramPercentiles(histogram, self._percentiles)
+
+ def _CalculateHistogramStatistics(self, histogram, trace_name):
+ """Returns the geometric mean and standard deviation for a histogram.
+
+ This method may be overridden by subclasses.
+
+ Args:
+ histogram: histogram data (relevant keys: "buckets", and for each bucket,
+ "min", "max" and "count").
+ trace_name: the trace that produced the data (not used in the base
+ implementation, but subclasses may use it)
+ """
+ geom_mean, stddev = chromium_utils.GeomMeanAndStdDevFromHistogram(histogram)
+ return geom_mean, stddev
+
+ def _BuildSummaryJson(self, graph):
+ """Returns JSON with the data in the given graph plus revision information.
+
+ Args:
+ graph: A GraphingLogProcessor.Graph object.
+
+ Returns:
+ The format output here is the "-summary.dat line" format; that is, it
+ is a JSON encoding of a dictionary that has the key "traces"
+ """
+ assert self._revision, 'revision must always be present'
+
+ graph_dict = collections.OrderedDict([
+ ('traces', graph.BuildTracesDict()),
+ ('rev', str(self._revision)),
+ ('git_revision', str(self._git_revision)),
+ ('webkit_rev', str(self._webkit_revision)),
+ ('webrtc_rev', str(self._webrtc_revision)),
+ ('v8_rev', str(self._v8_revision)),
+ ('ver', str(self._version)),
+ ('chan', str(self._channel)),
+ ('units', str(graph.units)),
+ ])
+
+ # Include a sorted list of important trace names if there are any.
+ important = [t for t in graph.traces.keys() if graph.traces[t].important]
+ if important:
+ graph_dict['important'] = sorted(important)
+
+ return json.dumps(graph_dict)
+
+ def _FinalizeProcessing(self):
+ self._CreateSummaryOutput()
+ self._GenerateGraphInfo()
+
+ def _CreateSummaryOutput(self):
+ """Writes the summary data file and collect the waterfall display text.
+
+ The summary file contains JSON-encoded data.
+
+ The waterfall contains lines for each important trace, in the form
+ tracename: value< (refvalue)>
+ """
+
+ for graph_name, graph in self._graphs.iteritems():
+ # Write a line in the applicable summary file for each graph.
+ filename = ('%s-summary.dat' % graph_name)
+ data = [self._BuildSummaryJson(graph) + '\n']
+ self._output[filename] = data + self._output.get(filename, [])
+
+ # Add a line to the waterfall for each important trace.
+ for trace_name, trace in graph.traces.iteritems():
+ if trace_name.endswith('_ref'):
+ continue
+ if trace.important:
+ display = '%s: %s' % (trace_name, _FormatHumanReadable(trace.value))
+ if graph.traces.get(trace_name + '_ref'):
+ display += ' (%s)' % _FormatHumanReadable(
+ graph.traces[trace_name + '_ref'].value)
+ self._text_summary.append(display)
+
+ self._text_summary.sort()
+
+ def _GenerateGraphInfo(self):
+ """Outputs a list of graphs viewed this session, for use by the plotter.
+
+ These will be collated and sorted on the master side.
+ """
+ graphs = {}
+ for name, graph in self._graphs.iteritems():
+ graphs[name] = {'name': name,
+ 'important': graph.IsImportant(),
+ 'units': graph.units}
+ self._output[self.GRAPH_LIST] = json.dumps(graphs).split('\n')
+
+ def GetGraphs(self):
+ """Returns a list of graph names."""
+ return self._graphs.keys()
+
+ def GetTraces(self, graph):
+ """Returns a dict of traces associated with the given graph.
+
+ Returns:
+ A dict mapping names of traces to two-element lists of value, stddev.
+ """
+ return self._graphs[graph].BuildTracesDict()
+
+ def GetUnits(self, graph):
+ """Returns the units associated with the given graph."""
+ return str(self._graphs[graph].units)
+
+
+class GraphingPageCyclerLogProcessor(GraphingLogProcessor):
+ """Handles additional processing for page-cycler timing data."""
+
+ _page_list = ['(unknown)']
+ PAGES_REGEXP = re.compile(r'^Pages: \[(?P<LIST>.*)\]')
+
+ def ProcessLine(self, line):
+ """Also looks for the Pages: line to find the page count."""
+ # super() should be used instead of GetParentClass().
+ # pylint: disable=W0212
+ line_match = self.PAGES_REGEXP.search(line)
+ if line_match:
+ self._page_list = line_match.groupdict()['LIST'].strip().split(',')
+ if len(self._page_list) < 1:
+ self._page_list = ['(unknown)']
+ else:
+ chromium_utils.GetParentClass(self).ProcessLine(self, line)
+
+ def _CalculateStatistics(self, value_list, trace_name):
+ """Handles statistics generation and recording for page-cycler data.
+
+ Sums the timings over all pages for each iteration and returns a tuple
+ (mean, standard deviation) of those sums. Also saves a data file
+ <revision>_<tracename>.dat holding a line of times for each URL loaded,
+ for use by humans when debugging a regression.
+ """
+
+ # If the name of the trace is one of the pages in the page list then we are
+ # dealing with the results for that page only, not the overall results. So
+ # calculate the statistics like a normal GraphingLogProcessor, not the
+ # GraphingPageCyclerLogProcessor.
+ if trace_name in self._page_list:
+ return super(GraphingPageCyclerLogProcessor, self)._CalculateStatistics(
+ value_list, trace_name)
+
+ value_count = len(value_list)
+ page_count = len(self._page_list)
+ # Chunk value_list into groups, where each sub-list
+ # has all the page times of one iteration.
+ iterations = [value_list[start:start+page_count]
+ for start in xrange(0, value_count, page_count)]
+
+ iteration_times = map(sum, iterations)
+ page_times_list = map(list, zip(*iterations))
+ page_times_dict = dict(zip(self._page_list, page_times_list))
+
+ pagedata = self._SavePageData(page_times_dict, trace_name)
+ val, stddev = chromium_utils.FilteredMeanAndStandardDeviation(
+ iteration_times)
+ return val, stddev, pagedata
+
+ def _SavePageData(self, page_times, trace_name):
+ """Saves a file holding the timing data for each page loaded.
+
+ Args:
+ page_times: a dict mapping a page URL to a list of its times
+ trace_name: the trace that produced this set of times
+
+ Returns:
+ A dict with one entry, mapping filename to file contents.
+ """
+ file_data = []
+ for page, times in sorted(page_times.iteritems()):
+ mean, stddev = chromium_utils.FilteredMeanAndStandardDeviation(times)
+ file_data.append('%s (%s+/-%s): %s' % (page,
+ _FormatFloat(mean),
+ _FormatFloat(stddev),
+ _JoinWithSpacesAndNewLine(times)))
+
+ filename = '%s_%s.dat' % (self._revision, trace_name)
+ return {filename: file_data}
+
+
+def _FormatFloat(number):
+ """Formats float with two decimal points."""
+ if number:
+ return '%.2f' % number
+ else:
+ return '0.00'
+
+
+def _FormatPercentage(ratio):
+ """Formats a number as a string with a percentage (e.g. 0.5 => "50%")."""
+ return '%s%%' % _FormatFloat(100 * ratio)
+
+
+def _Divide(x, y):
+ """Divides with float division, or returns infinity if denominator is 0."""
+ if y == 0:
+ return float('inf')
+ return float(x) / y
+
+
+def _FormatHumanReadable(number):
+ """Formats a float into three significant figures, using metric suffixes.
+
+ Only m, k, and M prefixes (for 1/1000, 1000, and 1,000,000) are used.
+ Examples:
+ 0.0387 => 38.7m
+ 1.1234 => 1.12
+ 10866 => 10.8k
+ 682851200 => 683M
+ """
+ metric_prefixes = {-3: 'm', 0: '', 3: 'k', 6: 'M'}
+ scientific = '%.2e' % float(number) # 6.83e+005
+ e_idx = scientific.find('e') # 4, or 5 if negative
+ digits = float(scientific[:e_idx]) # 6.83
+ exponent = int(scientific[e_idx + 1:]) # int('+005') = 5
+ while exponent % 3:
+ digits *= 10
+ exponent -= 1
+ while exponent > 6:
+ digits *= 10
+ exponent -= 1
+ while exponent < -3:
+ digits /= 10
+ exponent += 1
+ if digits >= 100:
+ # Don't append a meaningless '.0' to an integer number.
+ digits = int(digits)
+ # Exponent is now divisible by 3, between -3 and 6 inclusive: (-3, 0, 3, 6).
+ return '%s%s' % (digits, metric_prefixes[exponent])
+
+
+def _JoinWithSpacesAndNewLine(words):
+ """Joins a list of words together with spaces."""
+ return ' '.join(str(w) for w in words) + '\n'
diff --git a/infra/scripts/legacy/scripts/slave/results_dashboard.py b/infra/scripts/legacy/scripts/slave/results_dashboard.py
new file mode 100755
index 0000000..9c5b5ab
--- /dev/null
+++ b/infra/scripts/legacy/scripts/slave/results_dashboard.py
@@ -0,0 +1,393 @@
+#!/usr/bin/env python
+# Copyright (c) 2013 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""Functions for adding results to perf dashboard."""
+
+import calendar
+import datetime
+import httplib
+import json
+import os
+import urllib
+import urllib2
+
+from slave import slave_utils
+
+# The paths in the results dashboard URLs for sending and viewing results.
+SEND_RESULTS_PATH = '/add_point'
+RESULTS_LINK_PATH = '/report?masters=%s&bots=%s&tests=%s&rev=%s'
+
+# CACHE_DIR/CACHE_FILENAME will be created in options.build_dir to cache
+# results which need to be retried.
+CACHE_DIR = 'results_dashboard'
+CACHE_FILENAME = 'results_to_retry'
+
+
+def SendResults(data, url, build_dir):
+ """Sends results to the Chrome Performance Dashboard.
+
+ This function tries to send the given data to the dashboard, in addition to
+ any data from the cache file. The cache file contains any data that wasn't
+ successfully sent in a previous run.
+
+ Args:
+ data: The data to try to send. Must be JSON-serializable.
+ url: Performance Dashboard URL (including schema).
+ build_dir: Directory name, where the cache directory shall be.
+ """
+ results_json = json.dumps(data)
+
+ # Write the new request line to the cache file, which contains all lines
+ # that we shall try to send now.
+ cache_file_name = _GetCacheFileName(build_dir)
+ _AddLineToCacheFile(results_json, cache_file_name)
+
+ # Send all the results from this run and the previous cache to the dashboard.
+ fatal_error, errors = _SendResultsFromCache(cache_file_name, url)
+
+ # Print out a Buildbot link annotation.
+ link_annotation = _LinkAnnotation(url, data)
+ if link_annotation:
+ print link_annotation
+
+ # Print any errors; if there was a fatal error, it should be an exception.
+ for error in errors:
+ print error
+ if fatal_error:
+ print 'Error uploading to dashboard.'
+ print '@@@STEP_EXCEPTION@@@'
+ return False
+ return True
+
+
+def _GetCacheFileName(build_dir):
+ """Gets the cache filename, creating the file if it does not exist."""
+ cache_dir = os.path.join(os.path.abspath(build_dir), CACHE_DIR)
+ if not os.path.exists(cache_dir):
+ os.makedirs(cache_dir)
+ cache_filename = os.path.join(cache_dir, CACHE_FILENAME)
+ if not os.path.exists(cache_filename):
+ # Create the file.
+ open(cache_filename, 'wb').close()
+ return cache_filename
+
+
+def _AddLineToCacheFile(line, cache_file_name):
+ """Appends a line to the given file."""
+ with open(cache_file_name, 'ab') as cache:
+ cache.write('\n' + line)
+
+
+def _SendResultsFromCache(cache_file_name, url):
+ """Tries to send each line from the cache file in a separate request.
+
+ This also writes data which failed to send back to the cache file.
+
+ Args:
+ cache_file_name: A file name.
+
+ Returns:
+ A pair (fatal_error, errors), where fatal_error is a boolean indicating
+ whether there there was a major error and the step should fail, and errors
+ is a list of error strings.
+ """
+ with open(cache_file_name, 'rb') as cache:
+ cache_lines = cache.readlines()
+ total_results = len(cache_lines)
+
+ fatal_error = False
+ errors = []
+
+ lines_to_retry = []
+ for index, line in enumerate(cache_lines):
+ line = line.strip()
+ if not line:
+ continue
+ print 'Sending result %d of %d to dashboard.' % (index + 1, total_results)
+
+ # Check that the line that was read from the file is valid JSON. If not,
+ # don't try to send it, and don't re-try it later; just print an error.
+ if not _CanParseJSON(line):
+ errors.append('Could not parse JSON: %s' % line)
+ continue
+
+ error = _SendResultsJson(url, line)
+
+ # If the dashboard returned an error, we will re-try next time.
+ if error:
+ if 'HTTPError: 400' in error:
+ # If the remote app rejects the JSON, it's probably malformed,
+ # so we don't want to retry it.
+ print 'Discarding JSON, error:\n%s' % error
+ fatal_error = True
+ break
+
+ if index != len(cache_lines) - 1:
+ # The very last item in the cache_lines list is the new results line.
+ # If this line is not the new results line, then this results line
+ # has already been tried before; now it's considered fatal.
+ fatal_error = True
+
+ # The lines to retry are all lines starting from the current one.
+ lines_to_retry = [l.strip() for l in cache_lines[index:] if l.strip()]
+ errors.append(error)
+ break
+
+ # Write any failing requests to the cache file.
+ cache = open(cache_file_name, 'wb')
+ cache.write('\n'.join(set(lines_to_retry)))
+ cache.close()
+
+ return fatal_error, errors
+
+
+def _CanParseJSON(my_json):
+ """Returns True if the input can be parsed as JSON, False otherwise."""
+ try:
+ json.loads(my_json)
+ except ValueError:
+ return False
+ return True
+
+
+def MakeListOfPoints(charts, bot, test_name, mastername, buildername,
+ buildnumber, supplemental_columns):
+ """Constructs a list of point dictionaries to send.
+
+ The format output by this function is the original format for sending data
+ to the perf dashboard. Each
+
+ Args:
+ charts: A dictionary of chart names to chart data, as generated by the
+ log processor classes (see process_log_utils.GraphingLogProcessor).
+ bot: A string which comes from perf_id, e.g. linux-release.
+ test_name: A test suite name, e.g. sunspider.
+ mastername: Buildbot master name, e.g. chromium.perf.
+ buildername: Builder name (for stdio links).
+ buildnumber: Build number (for stdio links).
+ supplemental_columns: A dictionary of extra data to send with a point.
+
+ Returns:
+ A list of dictionaries in the format accepted by the perf dashboard.
+ Each dictionary has the keys "master", "bot", "test", "value", "revision".
+ The full details of this format are described at http://goo.gl/TcJliv.
+ """
+ results = []
+
+ # The master name used for the dashboard is the CamelCase name returned by
+ # GetActiveMaster(), and not the canonical master name with dots.
+ master = slave_utils.GetActiveMaster()
+
+ for chart_name, chart_data in sorted(charts.items()):
+ point_id, revision_columns = _RevisionNumberColumns(chart_data, prefix='r_')
+
+ for trace_name, trace_values in sorted(chart_data['traces'].items()):
+ is_important = trace_name in chart_data.get('important', [])
+ test_path = _TestPath(test_name, chart_name, trace_name)
+ result = {
+ 'master': master,
+ 'bot': bot,
+ 'test': test_path,
+ 'revision': point_id,
+ 'masterid': mastername,
+ 'buildername': buildername,
+ 'buildnumber': buildnumber,
+ 'supplemental_columns': {}
+ }
+
+ # Add the supplemental_columns values that were passed in after the
+ # calculated revision column values so that these can be overwritten.
+ result['supplemental_columns'].update(revision_columns)
+ result['supplemental_columns'].update(supplemental_columns)
+
+ result['value'] = trace_values[0]
+ result['error'] = trace_values[1]
+
+ # Add other properties to this result dictionary if available.
+ if chart_data.get('units'):
+ result['units'] = chart_data['units']
+ if is_important:
+ result['important'] = True
+
+ results.append(result)
+
+ return results
+
+
+def MakeDashboardJsonV1(chart_json, revision_data, bot, mastername,
+ buildername, buildnumber, supplemental_dict, is_ref):
+ """Generates Dashboard JSON in the new Telemetry format.
+
+ See http://goo.gl/mDZHPl for more info on the format.
+
+ Args:
+ chart_json: A dict containing the telmetry output.
+ revision_data: Data about revisions to include in the upload.
+ bot: A string which comes from perf_id, e.g. linux-release.
+ mastername: Buildbot master name, e.g. chromium.perf.
+ buildername: Builder name (for stdio links).
+ buildnumber: Build number (for stdio links).
+ supplemental_columns: A dictionary of extra data to send with a point.
+ is_ref: True if this is a reference build, False otherwise.
+
+ Returns:
+ A dictionary in the format accepted by the perf dashboard.
+ """
+ if not chart_json:
+ print 'Error: No json output from telemetry.'
+ print '@@@STEP_FAILURE@@@'
+ # The master name used for the dashboard is the CamelCase name returned by
+ # GetActiveMaster(), and not the canonical master name with dots.
+ master = slave_utils.GetActiveMaster()
+ point_id, revision_columns = _RevisionNumberColumns(revision_data, prefix='')
+ supplemental_columns = {}
+ for key in supplemental_dict:
+ supplemental_columns[key.replace('a_', '', 1)] = supplemental_dict[key]
+ fields = {
+ 'master': master,
+ 'bot': bot,
+ 'masterid': mastername,
+ 'buildername': buildername,
+ 'buildnumber': buildnumber,
+ 'point_id': point_id,
+ 'supplemental': supplemental_columns,
+ 'versions': revision_columns,
+ 'chart_data': chart_json,
+ 'is_ref': is_ref,
+ }
+ return fields
+
+
+def _GetTimestamp():
+ """Get the Unix timestamp for the current time."""
+ return int(calendar.timegm(datetime.datetime.utcnow().utctimetuple()))
+
+
+def _RevisionNumberColumns(data, prefix):
+ """Get the revision number and revision-related columns from the given data.
+
+ Args:
+ data: A dict of information from one line of the log file.
+ master: The name of the buildbot master.
+ prefix: Prefix for revision type keys. 'r_' for non-telemetry json, '' for
+ telemetry json.
+
+ Returns:
+ A tuple with the point id (which must be an int), and a dict of
+ revision-related columns.
+ """
+ revision_supplemental_columns = {}
+
+ # The dashboard requires points' x-values to be integers, and points are
+ # ordered by these x-values. If data['rev'] can't be parsed as an int, assume
+ # that it's a git commit hash and use timestamp as the x-value.
+ try:
+ revision = int(data['rev'])
+ if revision and revision > 300000 and revision < 1000000:
+ # Revision is the commit pos.
+ # TODO(sullivan,qyearsley): use got_revision_cp when available.
+ revision_supplemental_columns[prefix + 'commit_pos'] = revision
+ except ValueError:
+ # The dashboard requires ordered integer revision numbers. If the revision
+ # is not an integer, assume it's a git hash and send a timestamp.
+ revision = _GetTimestamp()
+ revision_supplemental_columns[prefix + 'chromium'] = data['rev']
+
+ # For other revision data, add it if it's present and not undefined:
+ for key in ['webkit_rev', 'webrtc_rev', 'v8_rev']:
+ if key in data and data[key] != 'undefined':
+ revision_supplemental_columns[prefix + key] = data[key]
+
+ # If possible, also send the git hash.
+ if 'git_revision' in data and data['git_revision'] != 'undefined':
+ revision_supplemental_columns[prefix + 'chromium'] = data['git_revision']
+
+ return revision, revision_supplemental_columns
+
+
+def _TestPath(test_name, chart_name, trace_name):
+ """Get the slash-separated test path to send.
+
+ Args:
+ test: Test name. Typically, this will be a top-level 'test suite' name.
+ chart_name: Name of a chart where multiple trace lines are grouped. If the
+ chart name is the same as the trace name, that signifies that this is
+ the main trace for the chart.
+ trace_name: The "trace name" is the name of an individual line on chart.
+
+ Returns:
+ A slash-separated list of names that corresponds to the hierarchy of test
+ data in the Chrome Performance Dashboard; doesn't include master or bot
+ name.
+ """
+ # For tests run on reference builds by builds/scripts/slave/telemetry.py,
+ # "_ref" is appended to the trace name. On the dashboard, as long as the
+ # result is on the right chart, it can just be called "ref".
+ if trace_name == chart_name + '_ref':
+ trace_name = 'ref'
+ chart_name = chart_name.replace('_by_url', '')
+
+ # No slashes are allowed in the trace name.
+ trace_name = trace_name.replace('/', '_')
+
+ # The results for "test/chart" and "test/chart/*" will all be shown on the
+ # same chart by the dashboard. The result with path "test/path" is considered
+ # the main trace for the chart.
+ test_path = '%s/%s/%s' % (test_name, chart_name, trace_name)
+ if chart_name == trace_name:
+ test_path = '%s/%s' % (test_name, chart_name)
+ return test_path
+
+
+def _SendResultsJson(url, results_json):
+ """Make a HTTP POST with the given JSON to the Performance Dashboard.
+
+ Args:
+ url: URL of Performance Dashboard instance, e.g.
+ "https://chromeperf.appspot.com".
+ results_json: JSON string that contains the data to be sent.
+
+ Returns:
+ None if successful, or an error string if there were errors.
+ """
+ # When data is provided to urllib2.Request, a POST is sent instead of GET.
+ # The data must be in the application/x-www-form-urlencoded format.
+ data = urllib.urlencode({'data': results_json})
+ req = urllib2.Request(url + SEND_RESULTS_PATH, data)
+ try:
+ urllib2.urlopen(req)
+ except urllib2.HTTPError as e:
+ return ('HTTPError: %d. Reponse: %s\n'
+ 'JSON: %s\n' % (e.code, e.read(), results_json))
+ except urllib2.URLError as e:
+ return 'URLError: %s for JSON %s\n' % (str(e.reason), results_json)
+ except httplib.HTTPException as e:
+ return 'HTTPException for JSON %s\n' % results_json
+ return None
+
+
+def _LinkAnnotation(url, data):
+ """Prints a link annotation with a link to the dashboard if possible.
+
+ Args:
+ url: The Performance Dashboard URL, e.g. "https://chromeperf.appspot.com"
+ data: The data that's being sent to the dashboard.
+
+ Returns:
+ An annotation to print, or None.
+ """
+ if not data:
+ return None
+ if isinstance(data, list):
+ master, bot, test, revision = (
+ data[0]['master'], data[0]['bot'], data[0]['test'], data[0]['revision'])
+ else:
+ master, bot, test, revision = (
+ data['master'], data['bot'], data['chart_data']['benchmark_name'],
+ data['point_id'])
+ results_link = url + RESULTS_LINK_PATH % (
+ urllib.quote(master), urllib.quote(bot), urllib.quote(test.split('/')[0]),
+ revision)
+ return '@@@STEP_LINK@%s@%s@@@' % ('Results Dashboard', results_link)
diff --git a/infra/scripts/legacy/scripts/slave/runisolatedtest.py b/infra/scripts/legacy/scripts/slave/runisolatedtest.py
new file mode 100755
index 0000000..e9b0fb0
--- /dev/null
+++ b/infra/scripts/legacy/scripts/slave/runisolatedtest.py
@@ -0,0 +1,169 @@
+#!/usr/bin/env python
+# Copyright (c) 2012 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""A tool to run a chrome test executable directly, or in isolated mode.
+
+TODO(maruel): This script technically needs to die and be replaced by running
+all the tests always isolated even when not run on Swarming. This will take a
+while.
+"""
+
+import logging
+import optparse
+import os
+import subprocess
+import sys
+
+
+USAGE = ('%s [options] /full/path/to/test.exe -- [original test command]' %
+ os.path.basename(sys.argv[0]))
+
+LINUX_ISOLATE_ENABLED_TESTS = set((
+ 'base_unittests',
+ 'browser_tests',
+ 'interactive_ui_tests',
+ 'net_unittests',
+ 'unit_tests',
+))
+
+# TODO(maruel): Not enabled because of lack of XCode support and missing
+# dependencies for more complex tests.
+MAC_ISOLATE_ENABLED_TESTS = set()
+
+WIN_ISOLATE_ENABLED_TESTS = set((
+ 'base_unittests',
+ 'browser_tests',
+ 'interactive_ui_tests',
+ 'net_unittests',
+ 'unit_tests',
+))
+
+# http://crbug.com/260311
+# They are missing files for an unknown reason.
+BUG_260311 = set((
+ 'browser_tests',
+ 'interactive_ui_tests',
+))
+
+ISOLATE_ENABLED_BUILDERS = {
+ # CI linux
+ 'Linux Tests': LINUX_ISOLATE_ENABLED_TESTS,
+ # CI mac
+ 'Mac10.6 Tests (1)': MAC_ISOLATE_ENABLED_TESTS,
+ 'Mac10.7 Tests (1)': MAC_ISOLATE_ENABLED_TESTS,
+ # CI win
+ 'Vista Tests (1)': WIN_ISOLATE_ENABLED_TESTS - BUG_260311,
+ 'Vista Tests (2)': WIN_ISOLATE_ENABLED_TESTS - BUG_260311,
+ 'Vista Tests (3)': WIN_ISOLATE_ENABLED_TESTS - BUG_260311,
+ 'Win7 Tests (1)': WIN_ISOLATE_ENABLED_TESTS,
+ 'Win7 Tests (2)': WIN_ISOLATE_ENABLED_TESTS,
+ 'Win7 Tests (3)': WIN_ISOLATE_ENABLED_TESTS,
+ 'XP Tests (1)': WIN_ISOLATE_ENABLED_TESTS - BUG_260311,
+ 'XP Tests (2)': WIN_ISOLATE_ENABLED_TESTS - BUG_260311,
+ 'XP Tests (3)': WIN_ISOLATE_ENABLED_TESTS - BUG_260311,
+
+ # Try Server
+ 'linux_rel': LINUX_ISOLATE_ENABLED_TESTS,
+ 'mac_rel': MAC_ISOLATE_ENABLED_TESTS,
+ 'win_rel': WIN_ISOLATE_ENABLED_TESTS,
+}
+
+
+def should_run_as_isolated(builder_name, test_name):
+ logging.info('should_run_as_isolated(%s, %s)' % (builder_name, test_name))
+ return test_name in ISOLATE_ENABLED_BUILDERS.get(builder_name, [])
+
+
+def run_command(command):
+ """Inspired from chromium_utils.py's RunCommand()."""
+ print '\n' + subprocess.list2cmdline(command)
+ sys.stdout.flush()
+ sys.stderr.flush()
+ return subprocess.call(command)
+
+
+def run_test_isolated(isolate_script, test_exe, original_command):
+ """Runs the test under isolate.py run.
+
+ It compensates for discrepancies between sharding_supervisor.py arguments and
+ run_test_cases.py arguments.
+
+ The isolated file must be alongside the test executable, with the same
+ name and the .isolated extension.
+ """
+ isolated_file = os.path.splitext(test_exe)[0] + '.isolated'
+
+ if not os.path.exists(isolated_file):
+ logging.error('No isolate file %s', isolated_file)
+ return 1
+
+ isolate_command = [sys.executable, isolate_script,
+ 'run', '--isolated', isolated_file,
+ # Print info log lines, so isolate.py prints the path to
+ # the binary it's about to run, http://crbug.com/311625
+ '-v']
+
+ # Start setting the test specific options.
+ isolate_command.append('--')
+ isolate_command.append('--no-cr')
+ original_command = original_command[:]
+ while original_command:
+ item = original_command.pop(0)
+ if item == '--total-slave':
+ isolate_command.extend(['--shards', original_command.pop(0)])
+ elif item == '--slave-index':
+ isolate_command.extend(['--index', original_command.pop(0)])
+ elif item.startswith(('--gtest_filter',
+ '--gtest_output',
+ '--test-launcher')):
+ isolate_command.append(item)
+
+ return run_command(isolate_command)
+
+
+def main(argv):
+ option_parser = optparse.OptionParser(USAGE)
+ option_parser.add_option('--test_name', default='',
+ help='The name of the test')
+ option_parser.add_option('--builder_name', default='',
+ help='The name of the builder that created this'
+ 'test')
+ option_parser.add_option('--checkout_dir',
+ help='Checkout directory, used to locate the '
+ 'swarm_client scripts.')
+ option_parser.add_option('-f', '--force-isolated', action='store_true',
+ help='Force test to run isolated. By default only '
+ 'white listed builders and tests are run isolated.')
+ option_parser.add_option('-v', '--verbose', action='count', default=0,
+ help='Use to increase log verbosity. Can be passed '
+ 'in multiple times for more detailed logs.')
+
+ options, args = option_parser.parse_args(argv)
+
+ test_exe = args[0]
+ original_command = args[1:]
+
+ # Initialize logging.
+ level = [logging.ERROR, logging.INFO, logging.DEBUG][min(2, options.verbose)]
+ logging.basicConfig(level=level,
+ format='%(asctime)s %(filename)s:%(lineno)-3d'
+ ' %(levelname)s %(message)s',
+ datefmt='%y%m%d %H:%M:%S')
+
+ if (options.force_isolated or
+ should_run_as_isolated(options.builder_name, options.test_name)):
+ logging.info('Running test in isolate mode')
+ # Search first in swarming_client
+ isolate_script = os.path.join(options.checkout_dir, 'src', 'tools',
+ 'swarming_client', 'isolate.py')
+
+ return run_test_isolated(isolate_script, test_exe, original_command)
+ else:
+ logging.info('Running test normally')
+ return run_command(original_command)
+
+
+if '__main__' == __name__:
+ sys.exit(main(None))
diff --git a/infra/scripts/legacy/scripts/slave/runtest.py b/infra/scripts/legacy/scripts/slave/runtest.py
new file mode 100755
index 0000000..75c3366
--- /dev/null
+++ b/infra/scripts/legacy/scripts/slave/runtest.py
@@ -0,0 +1,1948 @@
+#!/usr/bin/env python
+# Copyright (c) 2012 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""A tool used to run a Chrome test executable and process the output.
+
+This script is used by the buildbot slaves. It must be run from the outer
+build directory, e.g. chrome-release/build/.
+
+For a list of command-line options, call this script with '--help'.
+"""
+
+import ast
+import copy
+import datetime
+import exceptions
+import gzip
+import hashlib
+import json
+import logging
+import optparse
+import os
+import re
+import stat
+import subprocess
+import sys
+import tempfile
+
+# The following note was added in 2010 by nsylvain:
+#
+# sys.path needs to be modified here because python2.6 automatically adds the
+# system "google" module (/usr/lib/pymodules/python2.6/google) to sys.modules
+# when we import "chromium_config" (I don't know why it does this). This causes
+# the import of our local "google.*" modules to fail because python seems to
+# only look for a system "google.*", even if our path is in sys.path before
+# importing "google.*". If we modify sys.path here, before importing
+# "chromium_config", python2.6 properly uses our path to find our "google.*"
+# (even though it still automatically adds the system "google" module to
+# sys.modules, and probably should still be using that to resolve "google.*",
+# which I really don't understand).
+sys.path.insert(0, os.path.abspath('src/tools/python'))
+
+from common import chromium_utils
+from common import gtest_utils
+
+# TODO(crbug.com/403564). We almost certainly shouldn't be importing this.
+import config
+
+from slave import annotation_utils
+from slave import build_directory
+from slave import crash_utils
+from slave import gtest_slave_utils
+from slave import performance_log_processor
+from slave import results_dashboard
+from slave import slave_utils
+from slave import telemetry_utils
+from slave import xvfb
+
+USAGE = '%s [options] test.exe [test args]' % os.path.basename(sys.argv[0])
+
+CHROME_SANDBOX_PATH = '/opt/chromium/chrome_sandbox'
+
+# Directory to write JSON for test results into.
+DEST_DIR = 'gtest_results'
+
+# Names of httpd configuration file under different platforms.
+HTTPD_CONF = {
+ 'linux': 'httpd2_linux.conf',
+ 'mac': 'httpd2_mac.conf',
+ 'win': 'httpd.conf'
+}
+# Regex matching git comment lines containing svn revision info.
+GIT_SVN_ID_RE = re.compile(r'^git-svn-id: .*@([0-9]+) .*$')
+# Regex for the master branch commit position.
+GIT_CR_POS_RE = re.compile(r'^Cr-Commit-Position: refs/heads/master@{#(\d+)}$')
+
+# The directory that this script is in.
+BASE_DIR = os.path.dirname(os.path.abspath(__file__))
+
+LOG_PROCESSOR_CLASSES = {
+ 'gtest': gtest_utils.GTestLogParser,
+ 'graphing': performance_log_processor.GraphingLogProcessor,
+ 'pagecycler': performance_log_processor.GraphingPageCyclerLogProcessor,
+}
+
+
+def _GetTempCount():
+ """Returns the number of files and directories inside the temporary dir."""
+ return len(os.listdir(tempfile.gettempdir()))
+
+
+def _LaunchDBus():
+ """Launches DBus to work around a bug in GLib.
+
+ Works around a bug in GLib where it performs operations which aren't
+ async-signal-safe (in particular, memory allocations) between fork and exec
+ when it spawns subprocesses. This causes threads inside Chrome's browser and
+ utility processes to get stuck, and this harness to hang waiting for those
+ processes, which will never terminate. This doesn't happen on users'
+ machines, because they have an active desktop session and the
+ DBUS_SESSION_BUS_ADDRESS environment variable set, but it does happen on the
+ bots. See crbug.com/309093 for more details.
+
+ Returns:
+ True if it actually spawned DBus.
+ """
+ import platform
+ if (platform.uname()[0].lower() == 'linux' and
+ 'DBUS_SESSION_BUS_ADDRESS' not in os.environ):
+ try:
+ print 'DBUS_SESSION_BUS_ADDRESS env var not found, starting dbus-launch'
+ dbus_output = subprocess.check_output(['dbus-launch']).split('\n')
+ for line in dbus_output:
+ m = re.match(r'([^=]+)\=(.+)', line)
+ if m:
+ os.environ[m.group(1)] = m.group(2)
+ print ' setting %s to %s' % (m.group(1), m.group(2))
+ return True
+ except (subprocess.CalledProcessError, OSError) as e:
+ print 'Exception while running dbus_launch: %s' % e
+ return False
+
+
+def _ShutdownDBus():
+ """Manually kills the previously-launched DBus daemon.
+
+ It appears that passing --exit-with-session to dbus-launch in
+ _LaunchDBus(), above, doesn't cause the launched dbus-daemon to shut
+ down properly. Manually kill the sub-process using the PID it gave
+ us at launch time.
+
+ This function is called when the flag --spawn-dbus is given, and if
+ _LaunchDBus(), above, actually spawned the dbus-daemon.
+ """
+ import signal
+ if 'DBUS_SESSION_BUS_PID' in os.environ:
+ dbus_pid = os.environ['DBUS_SESSION_BUS_PID']
+ try:
+ os.kill(int(dbus_pid), signal.SIGTERM)
+ print ' killed dbus-daemon with PID %s' % dbus_pid
+ except OSError as e:
+ print ' error killing dbus-daemon with PID %s: %s' % (dbus_pid, e)
+ # Try to clean up any stray DBUS_SESSION_BUS_ADDRESS environment
+ # variable too. Some of the bots seem to re-invoke runtest.py in a
+ # way that this variable sticks around from run to run.
+ if 'DBUS_SESSION_BUS_ADDRESS' in os.environ:
+ del os.environ['DBUS_SESSION_BUS_ADDRESS']
+ print ' cleared DBUS_SESSION_BUS_ADDRESS environment variable'
+
+
+def _RunGTestCommand(
+ options, command, extra_env, log_processor=None, pipes=None):
+ """Runs a test, printing and possibly processing the output.
+
+ Args:
+ options: Options passed for this invocation of runtest.py.
+ command: A list of strings in a command (the command and its arguments).
+ extra_env: A dictionary of extra environment variables to set.
+ log_processor: A log processor instance which has the ProcessLine method.
+ pipes: A list of command string lists which the output will be piped to.
+
+ Returns:
+ The process return code.
+ """
+ env = os.environ.copy()
+ if extra_env:
+ print 'Additional test environment:'
+ for k, v in sorted(extra_env.items()):
+ print ' %s=%s' % (k, v)
+ env.update(extra_env or {})
+
+ # Trigger bot mode (test retries, redirection of stdio, possibly faster,
+ # etc.) - using an environment variable instead of command-line flags because
+ # some internal waterfalls run this (_RunGTestCommand) for totally non-gtest
+ # code.
+ # TODO(phajdan.jr): Clean this up when internal waterfalls are fixed.
+ env.update({'CHROMIUM_TEST_LAUNCHER_BOT_MODE': '1'})
+
+ log_processors = {}
+ if log_processor:
+ log_processors[log_processor.__class__.__name__] = log_processor
+
+ if (not 'GTestLogParser' in log_processors and
+ options.log_processor_output_file):
+ log_processors['GTestLogParser'] = gtest_utils.GTestLogParser()
+
+ def _ProcessLine(line):
+ for current_log_processor in log_processors.values():
+ current_log_processor.ProcessLine(line)
+
+ result = chromium_utils.RunCommand(
+ command, pipes=pipes, parser_func=_ProcessLine, env=env)
+
+ if options.log_processor_output_file:
+ _WriteLogProcessorResultsToOutput(
+ log_processors['GTestLogParser'], options.log_processor_output_file)
+
+ return result
+
+
+def _GetMaster():
+ """Return the master name for the current host."""
+ return chromium_utils.GetActiveMaster()
+
+
+def _GetMasterString(master):
+ """Returns a message describing what the master is."""
+ return '[Running for master: "%s"]' % master
+
+
+def _GetGitCommitPositionFromLog(log):
+ """Returns either the commit position or svn rev from a git log."""
+ # Parse from the bottom up, in case the commit message embeds the message
+ # from a different commit (e.g., for a revert).
+ for r in [GIT_CR_POS_RE, GIT_SVN_ID_RE]:
+ for line in reversed(log.splitlines()):
+ m = r.match(line.strip())
+ if m:
+ return m.group(1)
+ return None
+
+
+def _GetGitCommitPosition(dir_path):
+ """Extracts the commit position or svn revision number of the HEAD commit."""
+ git_exe = 'git.bat' if sys.platform.startswith('win') else 'git'
+ p = subprocess.Popen(
+ [git_exe, 'log', '-n', '1', '--pretty=format:%B', 'HEAD'],
+ cwd=dir_path, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
+ (log, _) = p.communicate()
+ if p.returncode != 0:
+ return None
+ return _GetGitCommitPositionFromLog(log)
+
+
+def _IsGitDirectory(dir_path):
+ """Checks whether the given directory is in a git repository.
+
+ Args:
+ dir_path: The directory path to be tested.
+
+ Returns:
+ True if given directory is in a git repository, False otherwise.
+ """
+ git_exe = 'git.bat' if sys.platform.startswith('win') else 'git'
+ with open(os.devnull, 'w') as devnull:
+ p = subprocess.Popen([git_exe, 'rev-parse', '--git-dir'],
+ cwd=dir_path, stdout=devnull, stderr=devnull)
+ return p.wait() == 0
+
+
+def _GetRevision(in_directory):
+ """Returns the SVN revision, git commit position, or git hash.
+
+ Args:
+ in_directory: A directory in the repository to be checked.
+
+ Returns:
+ An SVN revision as a string if the given directory is in a SVN repository,
+ or a git commit position number, or if that's not available, a git hash.
+ If all of that fails, an empty string is returned.
+ """
+ import xml.dom.minidom
+ if not os.path.exists(os.path.join(in_directory, '.svn')):
+ if _IsGitDirectory(in_directory):
+ svn_rev = _GetGitCommitPosition(in_directory)
+ if svn_rev:
+ return svn_rev
+ return _GetGitRevision(in_directory)
+ else:
+ return ''
+
+ # Note: Not thread safe: http://bugs.python.org/issue2320
+ output = subprocess.Popen(['svn', 'info', '--xml'],
+ cwd=in_directory,
+ shell=(sys.platform == 'win32'),
+ stdout=subprocess.PIPE).communicate()[0]
+ try:
+ dom = xml.dom.minidom.parseString(output)
+ return dom.getElementsByTagName('entry')[0].getAttribute('revision')
+ except xml.parsers.expat.ExpatError:
+ return ''
+ return ''
+
+
+def _GetGitRevision(in_directory):
+ """Returns the git hash tag for the given directory.
+
+ Args:
+ in_directory: The directory where git is to be run.
+
+ Returns:
+ The git SHA1 hash string.
+ """
+ git_exe = 'git.bat' if sys.platform.startswith('win') else 'git'
+ p = subprocess.Popen(
+ [git_exe, 'rev-parse', 'HEAD'],
+ cwd=in_directory, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
+ (stdout, _) = p.communicate()
+ return stdout.strip()
+
+
+def _GenerateJSONForTestResults(options, log_processor):
+ """Generates or updates a JSON file from the gtest results XML and upload the
+ file to the archive server.
+
+ The archived JSON file will be placed at:
+ www-dir/DEST_DIR/buildname/testname/results.json
+ on the archive server. NOTE: This will be deprecated.
+
+ Args:
+ options: command-line options that are supposed to have build_dir,
+ results_directory, builder_name, build_name and test_output_xml values.
+ log_processor: An instance of PerformanceLogProcessor or similar class.
+
+ Returns:
+ True upon success, False upon failure.
+ """
+ results_map = None
+ try:
+ if (os.path.exists(options.test_output_xml) and
+ not _UsingGtestJson(options)):
+ results_map = gtest_slave_utils.GetResultsMapFromXML(
+ options.test_output_xml)
+ else:
+ if _UsingGtestJson(options):
+ sys.stderr.write('using JSON summary output instead of gtest XML\n')
+ else:
+ sys.stderr.write(
+ ('"%s" \\ "%s" doesn\'t exist: Unable to generate JSON from XML, '
+ 'using log output.\n') % (os.getcwd(), options.test_output_xml))
+ # The file did not get generated. See if we can generate a results map
+ # from the log output.
+ results_map = gtest_slave_utils.GetResultsMap(log_processor)
+ except Exception as e:
+ # This error will be caught by the following 'not results_map' statement.
+ print 'Error: ', e
+
+ if not results_map:
+ print 'No data was available to update the JSON results'
+ # Consider this non-fatal.
+ return True
+
+ build_dir = os.path.abspath(options.build_dir)
+ slave_name = options.builder_name or slave_utils.SlaveBuildName(build_dir)
+
+ generate_json_options = copy.copy(options)
+ generate_json_options.build_name = slave_name
+ generate_json_options.input_results_xml = options.test_output_xml
+ generate_json_options.builder_base_url = '%s/%s/%s/%s' % (
+ config.Master.archive_url, DEST_DIR, slave_name, options.test_type)
+ generate_json_options.master_name = options.master_class_name or _GetMaster()
+ generate_json_options.test_results_server = config.Master.test_results_server
+
+ print _GetMasterString(generate_json_options.master_name)
+
+ generator = None
+
+ try:
+ if options.revision:
+ generate_json_options.chrome_revision = options.revision
+ else:
+ chrome_dir = chromium_utils.FindUpwardParent(build_dir, 'third_party')
+ generate_json_options.chrome_revision = _GetRevision(chrome_dir)
+
+ if options.webkit_revision:
+ generate_json_options.webkit_revision = options.webkit_revision
+ else:
+ webkit_dir = chromium_utils.FindUpward(
+ build_dir, 'third_party', 'WebKit', 'Source')
+ generate_json_options.webkit_revision = _GetRevision(webkit_dir)
+
+ # Generate results JSON file and upload it to the appspot server.
+ generator = gtest_slave_utils.GenerateJSONResults(
+ results_map, generate_json_options)
+
+ except Exception as e:
+ print 'Unexpected error while generating JSON: %s' % e
+ sys.excepthook(*sys.exc_info())
+ return False
+
+ # The code can throw all sorts of exceptions, including
+ # slave.gtest.networktransaction.NetworkTimeout so just trap everything.
+ # Earlier versions of this code ignored network errors, so until a
+ # retry mechanism is added, continue to do so rather than reporting
+ # an error.
+ try:
+ # Upload results JSON file to the appspot server.
+ gtest_slave_utils.UploadJSONResults(generator)
+ except Exception as e:
+ # Consider this non-fatal for the moment.
+ print 'Unexpected error while uploading JSON: %s' % e
+ sys.excepthook(*sys.exc_info())
+
+ return True
+
+
+def _BuildTestBinaryCommand(_build_dir, test_exe_path, options):
+ """Builds a command to run a test binary.
+
+ Args:
+ build_dir: Path to the tools/build directory.
+ test_exe_path: Path to test command binary.
+ options: Options passed for this invocation of runtest.py.
+
+ Returns:
+ A command, represented as a list of command parts.
+ """
+ command = [
+ test_exe_path,
+ ]
+
+ if options.annotate == 'gtest':
+ command.append('--test-launcher-bot-mode')
+
+ if options.total_shards and options.shard_index:
+ command.extend([
+ '--test-launcher-total-shards=%d' % options.total_shards,
+ '--test-launcher-shard-index=%d' % (options.shard_index - 1)])
+
+ return command
+
+
+def _UsingGtestJson(options):
+ """Returns True if we're using GTest JSON summary."""
+ return (options.annotate == 'gtest' and
+ not options.run_python_script and
+ not options.run_shell_script)
+
+
+def _ListLogProcessors(selection):
+ """Prints a list of available log processor classes iff the input is 'list'.
+
+ Args:
+ selection: A log processor name, or the string "list".
+
+ Returns:
+ True if a list was printed, False otherwise.
+ """
+ shouldlist = selection and selection == 'list'
+ if shouldlist:
+ print
+ print 'Available log processors:'
+ for p in LOG_PROCESSOR_CLASSES:
+ print ' ', p, LOG_PROCESSOR_CLASSES[p].__name__
+
+ return shouldlist
+
+
+def _SelectLogProcessor(options, is_telemetry):
+ """Returns a log processor class based on the command line options.
+
+ Args:
+ options: Command-line options (from OptionParser).
+ is_telemetry: bool for whether to create a telemetry log processor.
+
+ Returns:
+ A log processor class, or None.
+ """
+ if _UsingGtestJson(options):
+ return gtest_utils.GTestJSONParser
+
+ if is_telemetry:
+ return telemetry_utils.TelemetryResultsProcessor
+
+ if options.annotate:
+ if options.annotate in LOG_PROCESSOR_CLASSES:
+ if options.generate_json_file and options.annotate != 'gtest':
+ raise NotImplementedError('"%s" doesn\'t make sense with '
+ 'options.generate_json_file.')
+ else:
+ return LOG_PROCESSOR_CLASSES[options.annotate]
+ else:
+ raise KeyError('"%s" is not a valid GTest parser!' % options.annotate)
+ elif options.generate_json_file:
+ return LOG_PROCESSOR_CLASSES['gtest']
+
+ return None
+
+
+def _GetCommitPos(build_properties):
+ """Extracts the commit position from the build properties, if its there."""
+ if 'got_revision_cp' not in build_properties:
+ return None
+ commit_pos = build_properties['got_revision_cp']
+ return int(re.search(r'{#(\d+)}', commit_pos).group(1))
+
+
+def _GetMainRevision(options):
+ """Return revision to use as the numerical x-value in the perf dashboard.
+
+ This will be used as the value of "rev" in the data passed to
+ results_dashboard.SendResults.
+
+ In order or priority, this function could return:
+ 1. The value of the --revision flag (IF it can be parsed as an int).
+ 2. The value of "got_revision_cp" in build properties.
+ 3. An SVN number, git commit position, or git commit hash.
+ """
+ if options.revision and options.revision.isdigit():
+ return options.revision
+ commit_pos_num = _GetCommitPos(options.build_properties)
+ if commit_pos_num is not None:
+ return commit_pos_num
+ # TODO(sullivan,qyearsley): Don't fall back to _GetRevision if it returns
+ # a git commit, since this should be a numerical revision. Instead, abort
+ # and fail.
+ return _GetRevision(os.path.dirname(os.path.abspath(options.build_dir)))
+
+
+def _GetBlinkRevision(options):
+ if options.webkit_revision:
+ webkit_revision = options.webkit_revision
+ else:
+ try:
+ webkit_dir = chromium_utils.FindUpward(
+ os.path.abspath(options.build_dir), 'third_party', 'WebKit', 'Source')
+ webkit_revision = _GetRevision(webkit_dir)
+ except Exception:
+ webkit_revision = None
+ return webkit_revision
+
+
+def _GetTelemetryRevisions(options):
+ """Fills in the same revisions fields that process_log_utils does."""
+
+ versions = {}
+ versions['rev'] = _GetMainRevision(options)
+ versions['webkit_rev'] = _GetBlinkRevision(options)
+ versions['webrtc_rev'] = options.build_properties.get('got_webrtc_revision')
+ versions['v8_rev'] = options.build_properties.get('got_v8_revision')
+ versions['ver'] = options.build_properties.get('version')
+ versions['git_revision'] = options.build_properties.get('git_revision')
+ # There are a lot of "bad" revisions to check for, so clean them all up here.
+ for key in versions.keys():
+ if not versions[key] or versions[key] == 'undefined':
+ del versions[key]
+ return versions
+
+
+def _CreateLogProcessor(log_processor_class, options, telemetry_info):
+ """Creates a log processor instance.
+
+ Args:
+ log_processor_class: A subclass of PerformanceLogProcessor or similar class.
+ options: Command-line options (from OptionParser).
+ telemetry_info: dict of info for run_benchmark runs.
+
+ Returns:
+ An instance of a log processor class, or None.
+ """
+ if not log_processor_class:
+ return None
+
+ if log_processor_class.__name__ == 'TelemetryResultsProcessor':
+ tracker_obj = log_processor_class(
+ telemetry_info['filename'],
+ telemetry_info['is_ref'],
+ telemetry_info['cleanup_dir'])
+ elif log_processor_class.__name__ == 'GTestLogParser':
+ tracker_obj = log_processor_class()
+ elif log_processor_class.__name__ == 'GTestJSONParser':
+ tracker_obj = log_processor_class(
+ options.build_properties.get('mastername'))
+ else:
+ webkit_revision = _GetBlinkRevision(options) or 'undefined'
+ revision = _GetMainRevision(options) or 'undefined'
+
+ tracker_obj = log_processor_class(
+ revision=revision,
+ build_properties=options.build_properties,
+ factory_properties=options.factory_properties,
+ webkit_revision=webkit_revision)
+
+ if options.annotate and options.generate_json_file:
+ tracker_obj.ProcessLine(_GetMasterString(_GetMaster()))
+
+ return tracker_obj
+
+
+def _GetSupplementalColumns(build_dir, supplemental_colummns_file_name):
+ """Reads supplemental columns data from a file.
+
+ Args:
+ build_dir: Build dir name.
+ supplemental_columns_file_name: Name of a file which contains the
+ supplemental columns data (in JSON format).
+
+ Returns:
+ A dict of supplemental data to send to the dashboard.
+ """
+ supplemental_columns = {}
+ supplemental_columns_file = os.path.join(build_dir,
+ results_dashboard.CACHE_DIR,
+ supplemental_colummns_file_name)
+ if os.path.exists(supplemental_columns_file):
+ with file(supplemental_columns_file, 'r') as f:
+ supplemental_columns = json.loads(f.read())
+ return supplemental_columns
+
+
+def _ResultsDashboardDict(options):
+ """Generates a dict of info needed by the results dashboard.
+
+ Args:
+ options: Program arguments.
+
+ Returns:
+ dict containing data the dashboard needs.
+ """
+ build_dir = os.path.abspath(options.build_dir)
+ supplemental_columns = _GetSupplementalColumns(
+ build_dir, options.supplemental_columns_file)
+ extra_columns = options.perf_config
+ if extra_columns:
+ supplemental_columns.update(extra_columns)
+ fields = {
+ 'system': _GetPerfID(options),
+ 'test': options.test_type,
+ 'url': options.results_url,
+ 'mastername': options.build_properties.get('mastername'),
+ 'buildername': options.build_properties.get('buildername'),
+ 'buildnumber': options.build_properties.get('buildnumber'),
+ 'build_dir': build_dir,
+ 'supplemental_columns': supplemental_columns,
+ 'revisions': _GetTelemetryRevisions(options),
+ }
+ return fields
+
+
+def _GenerateDashboardJson(log_processor, args):
+ """Generates chartjson to send to the dashboard.
+
+ Args:
+ log_processor: An instance of a log processor class, which has been used to
+ process the test output, so it contains the test results.
+ args: Dict of additional args to send to results_dashboard.
+ """
+ assert log_processor.IsChartJson()
+
+ chart_json = log_processor.ChartJson()
+ if chart_json:
+ return results_dashboard.MakeDashboardJsonV1(
+ chart_json,
+ args['revisions'], args['system'], args['mastername'],
+ args['buildername'], args['buildnumber'],
+ args['supplemental_columns'], log_processor.IsReferenceBuild())
+ return None
+
+
+def _WriteLogProcessorResultsToOutput(log_processor, log_output_file):
+ """Writes the log processor's results to a file.
+
+ Args:
+ chartjson_file: Path to the file to write the results.
+ log_processor: An instance of a log processor class, which has been used to
+ process the test output, so it contains the test results.
+ """
+ with open(log_output_file, 'w') as f:
+ results = {
+ 'passed': log_processor.PassedTests(),
+ 'failed': log_processor.FailedTests(),
+ 'flakes': log_processor.FlakyTests(),
+ }
+ json.dump(results, f)
+
+
+def _WriteChartJsonToOutput(chartjson_file, log_processor, args):
+ """Writes the dashboard chartjson to a file for display in the waterfall.
+
+ Args:
+ chartjson_file: Path to the file to write the chartjson.
+ log_processor: An instance of a log processor class, which has been used to
+ process the test output, so it contains the test results.
+ args: Dict of additional args to send to results_dashboard.
+ """
+ assert log_processor.IsChartJson()
+
+ chartjson_data = _GenerateDashboardJson(log_processor, args)
+
+ with open(chartjson_file, 'w') as f:
+ json.dump(chartjson_data, f)
+
+
+def _SendResultsToDashboard(log_processor, args):
+ """Sends results from a log processor instance to the dashboard.
+
+ Args:
+ log_processor: An instance of a log processor class, which has been used to
+ process the test output, so it contains the test results.
+ args: Dict of additional args to send to results_dashboard.
+
+ Returns:
+ True if no errors occurred.
+ """
+ if args['system'] is None:
+ # perf_id not specified in factory properties.
+ print 'Error: No system name (perf_id) specified when sending to dashboard.'
+ return True
+
+ results = None
+ if log_processor.IsChartJson():
+ results = _GenerateDashboardJson(log_processor, args)
+ if not results:
+ print 'Error: No json output from telemetry.'
+ print '@@@STEP_FAILURE@@@'
+ log_processor.Cleanup()
+ else:
+ charts = _GetDataFromLogProcessor(log_processor)
+ results = results_dashboard.MakeListOfPoints(
+ charts, args['system'], args['test'], args['mastername'],
+ args['buildername'], args['buildnumber'], args['supplemental_columns'])
+
+ if not results:
+ return False
+
+ logging.debug(json.dumps(results, indent=2))
+ return results_dashboard.SendResults(results, args['url'], args['build_dir'])
+
+
+def _GetDataFromLogProcessor(log_processor):
+ """Returns a mapping of chart names to chart data.
+
+ Args:
+ log_processor: A log processor (aka results tracker) object.
+
+ Returns:
+ A dictionary mapping chart name to lists of chart data.
+ put together in log_processor. Each chart data dictionary contains:
+ "traces": A dictionary mapping trace names to value, stddev pairs.
+ "units": Units for the chart.
+ "rev": A revision number or git hash.
+ Plus other revision keys, e.g. webkit_rev, ver, v8_rev.
+ """
+ charts = {}
+ for log_file_name, line_list in log_processor.PerformanceLogs().iteritems():
+ if not log_file_name.endswith('-summary.dat'):
+ # The log processor data also contains "graphs list" file contents,
+ # which we can ignore.
+ continue
+ chart_name = log_file_name.replace('-summary.dat', '')
+
+ # It's assumed that the log lines list has length one, because for each
+ # graph name only one line is added in log_processor in the method
+ # GraphingLogProcessor._CreateSummaryOutput.
+ if len(line_list) != 1:
+ print 'Error: Unexpected log processor line list: %s' % str(line_list)
+ continue
+ line = line_list[0].rstrip()
+ try:
+ charts[chart_name] = json.loads(line)
+ except ValueError:
+ print 'Error: Could not parse JSON: %s' % line
+ return charts
+
+
+def _BuildCoverageGtestExclusions(options, args):
+ """Appends a list of GTest exclusion filters to the args list."""
+ gtest_exclusions = {
+ 'win32': {
+ 'browser_tests': (
+ 'ChromeNotifierDelegateBrowserTest.ClickTest',
+ 'ChromeNotifierDelegateBrowserTest.ButtonClickTest',
+ 'SyncFileSystemApiTest.GetFileStatuses',
+ 'SyncFileSystemApiTest.WriteFileThenGetUsage',
+ 'NaClExtensionTest.HostedApp',
+ 'MediaGalleriesPlatformAppBrowserTest.MediaGalleriesCopyToNoAccess',
+ 'PlatformAppBrowserTest.ComponentAppBackgroundPage',
+ 'BookmarksTest.CommandAgainGoesBackToBookmarksTab',
+ 'NotificationBitmapFetcherBrowserTest.OnURLFetchFailureTest',
+ 'PreservedWindowPlacementIsMigrated.Test',
+ 'ShowAppListBrowserTest.ShowAppListFlag',
+ '*AvatarMenuButtonTest.*',
+ 'NotificationBitmapFetcherBrowserTest.HandleImageFailedTest',
+ 'NotificationBitmapFetcherBrowserTest.OnImageDecodedTest',
+ 'NotificationBitmapFetcherBrowserTest.StartTest',
+ )
+ },
+ 'darwin2': {},
+ 'linux2': {},
+ }
+ gtest_exclusion_filters = []
+ if sys.platform in gtest_exclusions:
+ excldict = gtest_exclusions.get(sys.platform)
+ if options.test_type in excldict:
+ gtest_exclusion_filters = excldict[options.test_type]
+ args.append('--gtest_filter=-' + ':'.join(gtest_exclusion_filters))
+
+
+def _UploadProfilingData(options, args):
+ """Archives profiling data to Google Storage."""
+ # args[1] has --gtest-filter argument.
+ if len(args) < 2:
+ return 0
+
+ builder_name = options.build_properties.get('buildername')
+ if ((builder_name != 'XP Perf (dbg) (2)' and
+ builder_name != 'Linux Perf (lowmem)') or
+ options.build_properties.get('mastername') != 'chromium.perf' or
+ not options.build_properties.get('got_revision')):
+ return 0
+
+ gtest_filter = args[1]
+ if gtest_filter is None:
+ return 0
+ gtest_name = ''
+ if gtest_filter.find('StartupTest.*') > -1:
+ gtest_name = 'StartupTest'
+ else:
+ return 0
+
+ build_dir = os.path.normpath(os.path.abspath(options.build_dir))
+
+ # archive_profiling_data.py is in /b/build/scripts/slave and
+ # build_dir is /b/build/slave/SLAVE_NAME/build/src/build.
+ profiling_archive_tool = os.path.join(build_dir, '..', '..', '..', '..', '..',
+ 'scripts', 'slave',
+ 'archive_profiling_data.py')
+
+ if sys.platform == 'win32':
+ python = 'python_slave'
+ else:
+ python = 'python'
+
+ revision = options.build_properties.get('got_revision')
+ cmd = [python, profiling_archive_tool, '--revision', revision,
+ '--builder-name', builder_name, '--test-name', gtest_name]
+
+ return chromium_utils.RunCommand(cmd)
+
+
+def _UploadGtestJsonSummary(json_path, build_properties, test_exe, step_name):
+ """Archives GTest results to Google Storage.
+
+ Args:
+ json_path: path to the json-format output of the gtest.
+ build_properties: the build properties of a build in buildbot.
+ test_exe: the name of the gtest executable.
+ step_name: the name of the buildbot step running the gtest.
+ """
+ if not os.path.exists(json_path):
+ return
+
+ orig_json_data = 'invalid'
+ try:
+ with open(json_path) as orig_json:
+ orig_json_data = json.load(orig_json)
+ except ValueError:
+ pass
+
+ target_json = {
+ # Increment the version number when making incompatible changes
+ # to the layout of this dict. This way clients can recognize different
+ # formats instead of guessing.
+ 'version': 1,
+ 'timestamp': str(datetime.datetime.now()),
+ 'test_exe': test_exe,
+ 'build_properties': build_properties,
+ 'gtest_results': orig_json_data,
+ }
+ target_json_serialized = json.dumps(target_json, indent=2)
+
+ now = datetime.datetime.utcnow()
+ today = now.date()
+ weekly_timestamp = today - datetime.timedelta(days=today.weekday())
+
+ # Pick a non-colliding file name by hashing the JSON contents
+ # (build metadata should be different from build to build).
+ target_name = hashlib.sha1(target_json_serialized).hexdigest()
+
+ # Use a directory structure that makes it easy to filter by year,
+ # month, week and day based just on the file path.
+ date_json_gs_path = 'gs://chrome-gtest-results/raw/%d/%d/%d/%d/%s.json.gz' % (
+ weekly_timestamp.year,
+ weekly_timestamp.month,
+ weekly_timestamp.day,
+ today.day,
+ target_name)
+
+ # Use a directory structure so that the json results could be indexed by
+ # master_name/builder_name/build_number/step_name.
+ master_name = build_properties.get('mastername')
+ builder_name = build_properties.get('buildername')
+ build_number = build_properties.get('buildnumber')
+ buildbot_json_gs_path = ''
+ if (master_name and builder_name and
+ (build_number is not None and build_number != '') and step_name):
+ # build_number could be zero.
+ buildbot_json_gs_path = (
+ 'gs://chrome-gtest-results/buildbot/%s/%s/%d/%s.json.gz' % (
+ master_name,
+ builder_name,
+ build_number,
+ step_name))
+
+ fd, target_json_path = tempfile.mkstemp()
+ try:
+ with os.fdopen(fd, 'w') as f:
+ with gzip.GzipFile(fileobj=f, compresslevel=9) as gzipf:
+ gzipf.write(target_json_serialized)
+
+ slave_utils.GSUtilCopy(target_json_path, date_json_gs_path)
+ if buildbot_json_gs_path:
+ slave_utils.GSUtilCopy(target_json_path, buildbot_json_gs_path)
+ finally:
+ os.remove(target_json_path)
+
+ if target_json['gtest_results'] == 'invalid':
+ return
+
+ # Use a directory structure that makes it easy to filter by year,
+ # month, week and day based just on the file path.
+ bigquery_json_gs_path = (
+ 'gs://chrome-gtest-results/bigquery/%d/%d/%d/%d/%s.json.gz' % (
+ weekly_timestamp.year,
+ weekly_timestamp.month,
+ weekly_timestamp.day,
+ today.day,
+ target_name))
+
+ fd, bigquery_json_path = tempfile.mkstemp()
+ try:
+ with os.fdopen(fd, 'w') as f:
+ with gzip.GzipFile(fileobj=f, compresslevel=9) as gzipf:
+ for iteration_data in (
+ target_json['gtest_results']['per_iteration_data']):
+ for test_name, test_runs in iteration_data.iteritems():
+ # Compute the number of flaky failures. A failure is only considered
+ # flaky, when the test succeeds at least once on the same code.
+ # However, we do not consider a test flaky if it only changes
+ # between various failure states, e.g. FAIL and TIMEOUT.
+ num_successes = len([r['status'] for r in test_runs
+ if r['status'] == 'SUCCESS'])
+ num_failures = len(test_runs) - num_successes
+ if num_failures > 0 and num_successes > 0:
+ flaky_failures = num_failures
+ else:
+ flaky_failures = 0
+
+ for run_index, run_data in enumerate(test_runs):
+ row = {
+ 'test_name': test_name,
+ 'run_index': run_index,
+ 'elapsed_time_ms': run_data['elapsed_time_ms'],
+ 'status': run_data['status'],
+ 'test_exe': target_json['test_exe'],
+ 'global_tags': target_json['gtest_results']['global_tags'],
+ 'slavename':
+ target_json['build_properties'].get('slavename', ''),
+ 'buildername':
+ target_json['build_properties'].get('buildername', ''),
+ 'mastername':
+ target_json['build_properties'].get('mastername', ''),
+ 'raw_json_gs_path': date_json_gs_path,
+ 'timestamp': now.strftime('%Y-%m-%d %H:%M:%S.%f'),
+ 'flaky_failures': flaky_failures,
+ 'num_successes': num_successes,
+ 'num_failures': num_failures
+ }
+ gzipf.write(json.dumps(row) + '\n')
+
+ slave_utils.GSUtilCopy(bigquery_json_path, bigquery_json_gs_path)
+ finally:
+ os.remove(bigquery_json_path)
+
+
+def _GenerateRunIsolatedCommand(build_dir, test_exe_path, options, command):
+ """Converts the command to run through the run isolate script.
+
+ All commands are sent through the run isolated script, in case
+ they need to be run in isolate mode.
+ """
+ run_isolated_test = os.path.join(BASE_DIR, 'runisolatedtest.py')
+ isolate_command = [
+ sys.executable, run_isolated_test,
+ '--test_name', options.test_type,
+ '--builder_name', options.build_properties.get('buildername', ''),
+ '--checkout_dir', os.path.dirname(os.path.dirname(build_dir)),
+ ]
+ if options.factory_properties.get('force_isolated'):
+ isolate_command += ['--force-isolated']
+ isolate_command += [test_exe_path, '--'] + command
+
+ return isolate_command
+
+
+def _GetPerfID(options):
+ if options.perf_id:
+ perf_id = options.perf_id
+ else:
+ perf_id = options.factory_properties.get('perf_id')
+ if options.factory_properties.get('add_perf_id_suffix'):
+ perf_id += options.build_properties.get('perf_id_suffix')
+ return perf_id
+
+
+def _GetSanitizerSymbolizeCommand(strip_path_prefix=None, json_file_name=None):
+ script_path = os.path.abspath(os.path.join('src', 'tools', 'valgrind',
+ 'asan', 'asan_symbolize.py'))
+ command = [sys.executable, script_path]
+ if strip_path_prefix:
+ command.append(strip_path_prefix)
+ if json_file_name:
+ command.append('--test-summary-json-file=%s' % json_file_name)
+ return command
+
+
+def _SymbolizeSnippetsInJSON(options, json_file_name):
+ if not json_file_name:
+ return
+ symbolize_command = _GetSanitizerSymbolizeCommand(
+ strip_path_prefix=options.strip_path_prefix,
+ json_file_name=json_file_name)
+ try:
+ p = subprocess.Popen(symbolize_command, stderr=subprocess.PIPE)
+ (_, stderr) = p.communicate()
+ except OSError as e:
+ print 'Exception while symbolizing snippets: %s' % e
+
+ if p.returncode != 0:
+ print "Error: failed to symbolize snippets in JSON:\n"
+ print stderr
+
+
+def _MainParse(options, _args):
+ """Run input through annotated test parser.
+
+ This doesn't execute a test, but reads test input from a file and runs it
+ through the specified annotation parser (aka log processor).
+ """
+ if not options.annotate:
+ raise chromium_utils.MissingArgument('--parse-input doesn\'t make sense '
+ 'without --annotate.')
+
+ # If --annotate=list was passed, list the log processor classes and exit.
+ if _ListLogProcessors(options.annotate):
+ return 0
+
+ log_processor_class = _SelectLogProcessor(options, False)
+ log_processor = _CreateLogProcessor(log_processor_class, options, None)
+
+ if options.generate_json_file:
+ if os.path.exists(options.test_output_xml):
+ # remove the old XML output file.
+ os.remove(options.test_output_xml)
+
+ if options.parse_input == '-':
+ f = sys.stdin
+ else:
+ try:
+ f = open(options.parse_input, 'rb')
+ except IOError as e:
+ print 'Error %d opening \'%s\': %s' % (e.errno, options.parse_input,
+ e.strerror)
+ return 1
+
+ with f:
+ for line in f:
+ log_processor.ProcessLine(line)
+
+ if options.generate_json_file:
+ if not _GenerateJSONForTestResults(options, log_processor):
+ return 1
+
+ if options.annotate:
+ annotation_utils.annotate(
+ options.test_type, options.parse_result, log_processor,
+ perf_dashboard_id=options.perf_dashboard_id)
+
+ return options.parse_result
+
+
+def _MainMac(options, args, extra_env):
+ """Runs the test on mac."""
+ if len(args) < 1:
+ raise chromium_utils.MissingArgument('Usage: %s' % USAGE)
+
+ telemetry_info = _UpdateRunBenchmarkArgs(args, options)
+ test_exe = args[0]
+ if options.run_python_script:
+ build_dir = os.path.normpath(os.path.abspath(options.build_dir))
+ test_exe_path = test_exe
+ else:
+ build_dir = os.path.normpath(os.path.abspath(options.build_dir))
+ test_exe_path = os.path.join(build_dir, options.target, test_exe)
+
+ # Nuke anything that appears to be stale chrome items in the temporary
+ # directory from previous test runs (i.e.- from crashes or unittest leaks).
+ slave_utils.RemoveChromeTemporaryFiles()
+
+ if options.run_shell_script:
+ command = ['bash', test_exe_path]
+ elif options.run_python_script:
+ command = [sys.executable, test_exe]
+ else:
+ command = _BuildTestBinaryCommand(build_dir, test_exe_path, options)
+ command.extend(args[1:])
+
+ # If --annotate=list was passed, list the log processor classes and exit.
+ if _ListLogProcessors(options.annotate):
+ return 0
+ log_processor_class = _SelectLogProcessor(options, bool(telemetry_info))
+ log_processor = _CreateLogProcessor(
+ log_processor_class, options, telemetry_info)
+
+ if options.generate_json_file:
+ if os.path.exists(options.test_output_xml):
+ # remove the old XML output file.
+ os.remove(options.test_output_xml)
+
+ try:
+ if _UsingGtestJson(options):
+ json_file_name = log_processor.PrepareJSONFile(
+ options.test_launcher_summary_output)
+ command.append('--test-launcher-summary-output=%s' % json_file_name)
+
+ pipes = []
+ if options.use_symbolization_script:
+ pipes = [_GetSanitizerSymbolizeCommand()]
+
+ command = _GenerateRunIsolatedCommand(build_dir, test_exe_path, options,
+ command)
+ result = _RunGTestCommand(options, command, extra_env, pipes=pipes,
+ log_processor=log_processor)
+ finally:
+ if _UsingGtestJson(options):
+ _UploadGtestJsonSummary(json_file_name,
+ options.build_properties,
+ test_exe,
+ options.step_name)
+ log_processor.ProcessJSONFile(options.build_dir)
+
+ if options.generate_json_file:
+ if not _GenerateJSONForTestResults(options, log_processor):
+ return 1
+
+ if options.annotate:
+ annotation_utils.annotate(
+ options.test_type, result, log_processor,
+ perf_dashboard_id=options.perf_dashboard_id)
+
+ if options.chartjson_file and telemetry_info:
+ _WriteChartJsonToOutput(options.chartjson_file,
+ log_processor,
+ _ResultsDashboardDict(options))
+
+ if options.results_url:
+ if not _SendResultsToDashboard(
+ log_processor, _ResultsDashboardDict(options)):
+ return 1
+
+ return result
+
+
+def _MainIOS(options, args, extra_env):
+ """Runs the test on iOS."""
+ if len(args) < 1:
+ raise chromium_utils.MissingArgument('Usage: %s' % USAGE)
+
+ def kill_simulator():
+ chromium_utils.RunCommand(['/usr/bin/killall', 'iPhone Simulator'])
+
+ # For iOS tests, the args come in in the following order:
+ # [0] test display name formatted as 'test_name (device[ ios_version])'
+ # [1:] gtest args (e.g. --gtest_print_time)
+
+ # Set defaults in case the device family and iOS version can't be parsed out
+ # of |args|
+ device = 'iPhone Retina (4-inch)'
+ ios_version = '7.1'
+
+ # Parse the test_name and device from the test display name.
+ # The expected format is: <test_name> (<device>)
+ result = re.match(r'(.*) \((.*)\)$', args[0])
+ if result is not None:
+ test_name, device = result.groups()
+ # Check if the device has an iOS version. The expected format is:
+ # <device_name><space><ios_version>, where ios_version may have 2 or 3
+ # numerals (e.g. '4.3.11' or '5.0').
+ result = re.match(r'(.*) (\d+\.\d+(\.\d+)?)$', device)
+ if result is not None:
+ device = result.groups()[0]
+ ios_version = result.groups()[1]
+ else:
+ # If first argument is not in the correct format, log a warning but
+ # fall back to assuming the first arg is the test_name and just run
+ # on the iphone simulator.
+ test_name = args[0]
+ print ('Can\'t parse test name, device, and iOS version. '
+ 'Running %s on %s %s' % (test_name, device, ios_version))
+
+ # Build the args for invoking iossim, which will install the app on the
+ # simulator and launch it, then dump the test results to stdout.
+
+ build_dir = os.path.normpath(os.path.abspath(options.build_dir))
+ app_exe_path = os.path.join(
+ build_dir, options.target + '-iphonesimulator', test_name + '.app')
+ test_exe_path = os.path.join(
+ build_dir, 'ninja-iossim', options.target, 'iossim')
+ tmpdir = tempfile.mkdtemp()
+ command = [test_exe_path,
+ '-d', device,
+ '-s', ios_version,
+ '-t', '120',
+ '-u', tmpdir,
+ app_exe_path, '--'
+ ]
+ command.extend(args[1:])
+
+ # If --annotate=list was passed, list the log processor classes and exit.
+ if _ListLogProcessors(options.annotate):
+ return 0
+ log_processor = _CreateLogProcessor(
+ LOG_PROCESSOR_CLASSES['gtest'], options, None)
+
+ # Make sure the simulator isn't running.
+ kill_simulator()
+
+ # Nuke anything that appears to be stale chrome items in the temporary
+ # directory from previous test runs (i.e.- from crashes or unittest leaks).
+ slave_utils.RemoveChromeTemporaryFiles()
+
+ dirs_to_cleanup = [tmpdir]
+ crash_files_before = set([])
+ crash_files_after = set([])
+ crash_files_before = set(crash_utils.list_crash_logs())
+
+ result = _RunGTestCommand(options, command, extra_env, log_processor)
+
+ # Because test apps kill themselves, iossim sometimes returns non-zero
+ # status even though all tests have passed. Check the log_processor to
+ # see if the test run was successful.
+ if log_processor.CompletedWithoutFailure():
+ result = 0
+ else:
+ result = 1
+
+ if result != 0:
+ crash_utils.wait_for_crash_logs()
+ crash_files_after = set(crash_utils.list_crash_logs())
+
+ kill_simulator()
+
+ new_crash_files = crash_files_after.difference(crash_files_before)
+ crash_utils.print_new_crash_files(new_crash_files)
+
+ for a_dir in dirs_to_cleanup:
+ try:
+ chromium_utils.RemoveDirectory(a_dir)
+ except OSError as e:
+ print >> sys.stderr, e
+ # Don't fail.
+
+ return result
+
+
+def _MainLinux(options, args, extra_env):
+ """Runs the test on Linux."""
+ import platform
+ xvfb_path = os.path.join(os.path.dirname(sys.argv[0]), '..', '..',
+ 'third_party', 'xvfb', platform.architecture()[0])
+
+ if len(args) < 1:
+ raise chromium_utils.MissingArgument('Usage: %s' % USAGE)
+
+ build_dir = os.path.normpath(os.path.abspath(options.build_dir))
+ if options.slave_name:
+ slave_name = options.slave_name
+ else:
+ slave_name = slave_utils.SlaveBuildName(build_dir)
+ bin_dir = os.path.join(build_dir, options.target)
+
+ # Figure out what we want for a special frame buffer directory.
+ special_xvfb_dir = None
+ fp_chromeos = options.factory_properties.get('chromeos', None)
+ if (fp_chromeos or
+ slave_utils.GypFlagIsOn(options, 'use_aura') or
+ slave_utils.GypFlagIsOn(options, 'chromeos')):
+ special_xvfb_dir = xvfb_path
+
+ telemetry_info = _UpdateRunBenchmarkArgs(args, options)
+ test_exe = args[0]
+ if options.run_python_script:
+ test_exe_path = test_exe
+ else:
+ test_exe_path = os.path.join(bin_dir, test_exe)
+ if not os.path.exists(test_exe_path):
+ if options.factory_properties.get('succeed_on_missing_exe', False):
+ print '%s missing but succeed_on_missing_exe used, exiting' % (
+ test_exe_path)
+ return 0
+ msg = 'Unable to find %s' % test_exe_path
+ raise chromium_utils.PathNotFound(msg)
+
+ # Unset http_proxy and HTTPS_PROXY environment variables. When set, this
+ # causes some tests to hang. See http://crbug.com/139638 for more info.
+ if 'http_proxy' in os.environ:
+ del os.environ['http_proxy']
+ print 'Deleted http_proxy environment variable.'
+ if 'HTTPS_PROXY' in os.environ:
+ del os.environ['HTTPS_PROXY']
+ print 'Deleted HTTPS_PROXY environment variable.'
+
+ # Path to SUID sandbox binary. This must be installed on all bots.
+ extra_env['CHROME_DEVEL_SANDBOX'] = CHROME_SANDBOX_PATH
+
+ # Nuke anything that appears to be stale chrome items in the temporary
+ # directory from previous test runs (i.e.- from crashes or unittest leaks).
+ slave_utils.RemoveChromeTemporaryFiles()
+
+ extra_env['LD_LIBRARY_PATH'] = ''
+
+ if options.enable_lsan:
+ # Use the debug version of libstdc++ under LSan. If we don't, there will be
+ # a lot of incomplete stack traces in the reports.
+ extra_env['LD_LIBRARY_PATH'] += '/usr/lib/x86_64-linux-gnu/debug:'
+
+ extra_env['LD_LIBRARY_PATH'] += '%s:%s/lib:%s/lib.target' % (bin_dir, bin_dir,
+ bin_dir)
+
+ if options.run_shell_script:
+ command = ['bash', test_exe_path]
+ elif options.run_python_script:
+ command = [sys.executable, test_exe]
+ else:
+ command = _BuildTestBinaryCommand(build_dir, test_exe_path, options)
+ command.extend(args[1:])
+
+ # If --annotate=list was passed, list the log processor classes and exit.
+ if _ListLogProcessors(options.annotate):
+ return 0
+ log_processor_class = _SelectLogProcessor(options, bool(telemetry_info))
+ log_processor = _CreateLogProcessor(
+ log_processor_class, options, telemetry_info)
+
+ if options.generate_json_file:
+ if os.path.exists(options.test_output_xml):
+ # remove the old XML output file.
+ os.remove(options.test_output_xml)
+
+ try:
+ start_xvfb = False
+ json_file_name = None
+
+ # TODO(dpranke): checking on test_exe is a temporary hack until we
+ # can change the buildbot master to pass --xvfb instead of --no-xvfb
+ # for these two steps. See
+ # https://code.google.com/p/chromium/issues/detail?id=179814
+ start_xvfb = (options.xvfb or
+ 'layout_test_wrapper' in test_exe or
+ 'devtools_perf_test_wrapper' in test_exe)
+ if start_xvfb:
+ xvfb.StartVirtualX(
+ slave_name, bin_dir,
+ with_wm=(options.factory_properties.get('window_manager', 'True') ==
+ 'True'),
+ server_dir=special_xvfb_dir)
+
+ if _UsingGtestJson(options):
+ json_file_name = log_processor.PrepareJSONFile(
+ options.test_launcher_summary_output)
+ command.append('--test-launcher-summary-output=%s' % json_file_name)
+
+ pipes = []
+ # See the comment in main() regarding offline symbolization.
+ if options.use_symbolization_script:
+ symbolize_command = _GetSanitizerSymbolizeCommand(
+ strip_path_prefix=options.strip_path_prefix)
+ pipes = [symbolize_command]
+
+ command = _GenerateRunIsolatedCommand(build_dir, test_exe_path, options,
+ command)
+ result = _RunGTestCommand(options, command, extra_env, pipes=pipes,
+ log_processor=log_processor)
+ finally:
+ if start_xvfb:
+ xvfb.StopVirtualX(slave_name)
+ if _UsingGtestJson(options):
+ if options.use_symbolization_script:
+ _SymbolizeSnippetsInJSON(options, json_file_name)
+ if json_file_name:
+ _UploadGtestJsonSummary(json_file_name,
+ options.build_properties,
+ test_exe,
+ options.step_name)
+ log_processor.ProcessJSONFile(options.build_dir)
+
+ if options.generate_json_file:
+ if not _GenerateJSONForTestResults(options, log_processor):
+ return 1
+
+ if options.annotate:
+ annotation_utils.annotate(
+ options.test_type, result, log_processor,
+ perf_dashboard_id=options.perf_dashboard_id)
+
+ if options.chartjson_file and telemetry_info:
+ _WriteChartJsonToOutput(options.chartjson_file,
+ log_processor,
+ _ResultsDashboardDict(options))
+
+ if options.results_url:
+ if not _SendResultsToDashboard(
+ log_processor, _ResultsDashboardDict(options)):
+ return 1
+
+ return result
+
+
+def _MainWin(options, args, extra_env):
+ """Runs tests on windows.
+
+ Using the target build configuration, run the executable given in the
+ first non-option argument, passing any following arguments to that
+ executable.
+
+ Args:
+ options: Command-line options for this invocation of runtest.py.
+ args: Command and arguments for the test.
+ extra_env: A dictionary of extra environment variables to set.
+
+ Returns:
+ Exit status code.
+ """
+ if len(args) < 1:
+ raise chromium_utils.MissingArgument('Usage: %s' % USAGE)
+
+ telemetry_info = _UpdateRunBenchmarkArgs(args, options)
+ test_exe = args[0]
+ build_dir = os.path.abspath(options.build_dir)
+ if options.run_python_script:
+ test_exe_path = test_exe
+ else:
+ test_exe_path = os.path.join(build_dir, options.target, test_exe)
+
+ if not os.path.exists(test_exe_path):
+ if options.factory_properties.get('succeed_on_missing_exe', False):
+ print '%s missing but succeed_on_missing_exe used, exiting' % (
+ test_exe_path)
+ return 0
+ raise chromium_utils.PathNotFound('Unable to find %s' % test_exe_path)
+
+ if options.run_python_script:
+ command = [sys.executable, test_exe]
+ else:
+ command = _BuildTestBinaryCommand(build_dir, test_exe_path, options)
+
+ command.extend(args[1:])
+
+ # Nuke anything that appears to be stale chrome items in the temporary
+ # directory from previous test runs (i.e.- from crashes or unittest leaks).
+ slave_utils.RemoveChromeTemporaryFiles()
+
+ # If --annotate=list was passed, list the log processor classes and exit.
+ if _ListLogProcessors(options.annotate):
+ return 0
+ log_processor_class = _SelectLogProcessor(options, bool(telemetry_info))
+ log_processor = _CreateLogProcessor(
+ log_processor_class, options, telemetry_info)
+
+ if options.generate_json_file:
+ if os.path.exists(options.test_output_xml):
+ # remove the old XML output file.
+ os.remove(options.test_output_xml)
+
+ try:
+ if _UsingGtestJson(options):
+ json_file_name = log_processor.PrepareJSONFile(
+ options.test_launcher_summary_output)
+ command.append('--test-launcher-summary-output=%s' % json_file_name)
+
+ command = _GenerateRunIsolatedCommand(build_dir, test_exe_path, options,
+ command)
+ result = _RunGTestCommand(options, command, extra_env, log_processor)
+ finally:
+ if _UsingGtestJson(options):
+ _UploadGtestJsonSummary(json_file_name,
+ options.build_properties,
+ test_exe,
+ options.step_name)
+ log_processor.ProcessJSONFile(options.build_dir)
+
+ if options.generate_json_file:
+ if not _GenerateJSONForTestResults(options, log_processor):
+ return 1
+
+ if options.annotate:
+ annotation_utils.annotate(
+ options.test_type, result, log_processor,
+ perf_dashboard_id=options.perf_dashboard_id)
+
+ if options.chartjson_file and telemetry_info:
+ _WriteChartJsonToOutput(options.chartjson_file,
+ log_processor,
+ _ResultsDashboardDict(options))
+
+ if options.results_url:
+ if not _SendResultsToDashboard(
+ log_processor, _ResultsDashboardDict(options)):
+ return 1
+
+ return result
+
+
+def _MainAndroid(options, args, extra_env):
+ """Runs tests on android.
+
+ Running GTest-based tests on android is different than on Linux as it requires
+ src/build/android/test_runner.py to deploy and communicate with the device.
+ Python scripts are the same as with Linux.
+
+ Args:
+ options: Command-line options for this invocation of runtest.py.
+ args: Command and arguments for the test.
+ extra_env: A dictionary of extra environment variables to set.
+
+ Returns:
+ Exit status code.
+ """
+ if options.run_python_script:
+ return _MainLinux(options, args, extra_env)
+
+ if len(args) < 1:
+ raise chromium_utils.MissingArgument('Usage: %s' % USAGE)
+
+ if _ListLogProcessors(options.annotate):
+ return 0
+ log_processor_class = _SelectLogProcessor(options, False)
+ log_processor = _CreateLogProcessor(log_processor_class, options, None)
+
+ if options.generate_json_file:
+ if os.path.exists(options.test_output_xml):
+ # remove the old XML output file.
+ os.remove(options.test_output_xml)
+
+ # Assume it's a gtest apk, so use the android harness.
+ test_suite = args[0]
+ run_test_target_option = '--release'
+ if options.target == 'Debug':
+ run_test_target_option = '--debug'
+ command = ['src/build/android/test_runner.py', 'gtest',
+ run_test_target_option, '-s', test_suite]
+
+ if options.flakiness_dashboard_server:
+ command += ['--flakiness-dashboard-server=%s' %
+ options.flakiness_dashboard_server]
+
+ result = _RunGTestCommand(
+ options, command, extra_env, log_processor=log_processor)
+
+ if options.generate_json_file:
+ if not _GenerateJSONForTestResults(options, log_processor):
+ return 1
+
+ if options.annotate:
+ annotation_utils.annotate(
+ options.test_type, result, log_processor,
+ perf_dashboard_id=options.perf_dashboard_id)
+
+ if options.results_url:
+ if not _SendResultsToDashboard(
+ log_processor, _ResultsDashboardDict(options)):
+ return 1
+
+ return result
+
+
+def _UpdateRunBenchmarkArgs(args, options):
+ """Updates the arguments for telemetry run_benchmark commands.
+
+ Ensures that --output=chartjson is set and adds a --output argument.
+
+ Arguments:
+ args: list of command line arguments, starts with 'run_benchmark' for
+ telemetry tests.
+
+ Returns:
+ None if not a telemetry test, otherwise a
+ dict containing the output filename and whether it is a reference build.
+ """
+ if not options.chartjson_file:
+ return {}
+
+ if args[0].endswith('run_benchmark'):
+ is_ref = '--browser=reference' in args
+ output_dir = tempfile.mkdtemp()
+ args.extend(['--output-dir=%s' % output_dir])
+ temp_filename = os.path.join(output_dir, 'results-chart.json')
+ return {'filename': temp_filename, 'is_ref': is_ref, 'cleanup_dir': True}
+ elif args[0].endswith('test_runner.py'):
+ (_, temp_json_filename) = tempfile.mkstemp()
+ args.extend(['--output-chartjson-data=%s' % temp_json_filename])
+ return {'filename': temp_json_filename,
+ 'is_ref': False,
+ 'cleanup_dir': False}
+
+ return None
+
+
+def _ConfigureSanitizerTools(options, args, extra_env):
+ if (options.enable_asan or options.enable_tsan or
+ options.enable_msan or options.enable_lsan):
+ # Instruct GTK to use malloc while running ASan, TSan, MSan or LSan tests.
+ extra_env['G_SLICE'] = 'always-malloc'
+ extra_env['NSS_DISABLE_ARENA_FREE_LIST'] = '1'
+ extra_env['NSS_DISABLE_UNLOAD'] = '1'
+
+ symbolizer_path = os.path.abspath(os.path.join('src', 'third_party',
+ 'llvm-build', 'Release+Asserts', 'bin', 'llvm-symbolizer'))
+ disable_sandbox_flag = '--no-sandbox'
+ if args and 'layout_test_wrapper' in args[0]:
+ disable_sandbox_flag = '--additional-drt-flag=%s' % disable_sandbox_flag
+
+ # Symbolization of sanitizer reports.
+ if sys.platform in ['win32', 'cygwin']:
+ # On Windows, the in-process symbolizer works even when sandboxed.
+ symbolization_options = []
+ elif options.enable_tsan or options.enable_lsan:
+ # TSan and LSan are not sandbox-compatible, so we can use online
+ # symbolization. In fact, they need symbolization to be able to apply
+ # suppressions.
+ symbolization_options = ['symbolize=1',
+ 'external_symbolizer_path=%s' % symbolizer_path,
+ 'strip_path_prefix=%s' % options.strip_path_prefix]
+ elif options.enable_asan or options.enable_msan:
+ # ASan and MSan use a script for offline symbolization.
+ # Important note: when running ASan or MSan with leak detection enabled,
+ # we must use the LSan symbolization options above.
+ symbolization_options = ['symbolize=0']
+ # Set the path to llvm-symbolizer to be used by asan_symbolize.py
+ extra_env['LLVM_SYMBOLIZER_PATH'] = symbolizer_path
+ options.use_symbolization_script = True
+
+ def AddToExistingEnv(env_dict, key, options_list):
+ # Adds a key to the supplied environment dictionary but appends it to
+ # existing environment variables if it already contains values.
+ assert type(env_dict) is dict
+ assert type(options_list) is list
+ env_dict[key] = ' '.join(filter(bool, [os.environ.get(key)]+options_list))
+
+ # ThreadSanitizer
+ if options.enable_tsan:
+ tsan_options = symbolization_options
+ AddToExistingEnv(extra_env, 'TSAN_OPTIONS', tsan_options)
+ # Disable sandboxing under TSan for now. http://crbug.com/223602.
+ args.append(disable_sandbox_flag)
+
+ # LeakSanitizer
+ if options.enable_lsan:
+ # Symbolization options set here take effect only for standalone LSan.
+ lsan_options = symbolization_options
+ AddToExistingEnv(extra_env, 'LSAN_OPTIONS', lsan_options)
+
+ # Disable sandboxing under LSan.
+ args.append(disable_sandbox_flag)
+
+ # AddressSanitizer
+ if options.enable_asan:
+ asan_options = symbolization_options
+ if options.enable_lsan:
+ asan_options += ['detect_leaks=1']
+ AddToExistingEnv(extra_env, 'ASAN_OPTIONS', asan_options)
+
+ # MemorySanitizer
+ if options.enable_msan:
+ msan_options = symbolization_options
+ if options.enable_lsan:
+ msan_options += ['detect_leaks=1']
+ AddToExistingEnv(extra_env, 'MSAN_OPTIONS', msan_options)
+
+
+def main():
+ """Entry point for runtest.py.
+
+ This function:
+ (1) Sets up the command-line options.
+ (2) Sets environment variables based on those options.
+ (3) Delegates to the platform-specific main functions.
+
+ Returns:
+ Exit code for this script.
+ """
+ option_parser = optparse.OptionParser(usage=USAGE)
+
+ # Since the trailing program to run may have has command-line args of its
+ # own, we need to stop parsing when we reach the first positional argument.
+ option_parser.disable_interspersed_args()
+
+ option_parser.add_option('--target', default='Release',
+ help='build target (Debug or Release)')
+ option_parser.add_option('--pass-target', action='store_true', default=False,
+ help='pass --target to the spawned test script')
+ option_parser.add_option('--build-dir', help='ignored')
+ option_parser.add_option('--pass-build-dir', action='store_true',
+ default=False,
+ help='pass --build-dir to the spawned test script')
+ option_parser.add_option('--test-platform',
+ help='Platform to test on, e.g. ios-simulator')
+ option_parser.add_option('--total-shards', dest='total_shards',
+ default=None, type='int',
+ help='Number of shards to split this test into.')
+ option_parser.add_option('--shard-index', dest='shard_index',
+ default=None, type='int',
+ help='Shard to run. Must be between 1 and '
+ 'total-shards.')
+ option_parser.add_option('--run-shell-script', action='store_true',
+ default=False,
+ help='treat first argument as the shell script'
+ 'to run.')
+ option_parser.add_option('--run-python-script', action='store_true',
+ default=False,
+ help='treat first argument as a python script'
+ 'to run.')
+ option_parser.add_option('--generate-json-file', action='store_true',
+ default=False,
+ help='output JSON results file if specified.')
+ option_parser.add_option('--xvfb', action='store_true', dest='xvfb',
+ default=True,
+ help='Start virtual X server on Linux.')
+ option_parser.add_option('--no-xvfb', action='store_false', dest='xvfb',
+ help='Do not start virtual X server on Linux.')
+ option_parser.add_option('-o', '--results-directory', default='',
+ help='output results directory for JSON file.')
+ option_parser.add_option('--chartjson-file', default='',
+ help='File to dump chartjson results.')
+ option_parser.add_option('--log-processor-output-file', default='',
+ help='File to dump gtest log processor results.')
+ option_parser.add_option('--builder-name', default=None,
+ help='The name of the builder running this script.')
+ option_parser.add_option('--slave-name', default=None,
+ help='The name of the slave running this script.')
+ option_parser.add_option('--master-class-name', default=None,
+ help='The class name of the buildbot master running '
+ 'this script: examples include "Chromium", '
+ '"ChromiumWebkit", and "ChromiumGPU". The '
+ 'flakiness dashboard uses this value to '
+ 'categorize results. See buildershandler.py '
+ 'in the flakiness dashboard code '
+ '(use codesearch) for the known values. '
+ 'Defaults to fetching it from '
+ 'slaves.cfg/builders.pyl.')
+ option_parser.add_option('--build-number', default=None,
+ help=('The build number of the builder running'
+ 'this script.'))
+ option_parser.add_option('--step-name', default=None,
+ help=('The name of the step running this script.'))
+ option_parser.add_option('--test-type', default='',
+ help='The test name that identifies the test, '
+ 'e.g. \'unit-tests\'')
+ option_parser.add_option('--test-results-server', default='',
+ help='The test results server to upload the '
+ 'results.')
+ option_parser.add_option('--annotate', default='',
+ help='Annotate output when run as a buildstep. '
+ 'Specify which type of test to parse, available'
+ ' types listed with --annotate=list.')
+ option_parser.add_option('--parse-input', default='',
+ help='When combined with --annotate, reads test '
+ 'from a file instead of executing a test '
+ 'binary. Use - for stdin.')
+ option_parser.add_option('--parse-result', default=0,
+ help='Sets the return value of the simulated '
+ 'executable under test. Only has meaning when '
+ '--parse-input is used.')
+ option_parser.add_option('--results-url', default='',
+ help='The URI of the perf dashboard to upload '
+ 'results to.')
+ option_parser.add_option('--perf-dashboard-id', default='',
+ help='The ID on the perf dashboard to add results '
+ 'to.')
+ option_parser.add_option('--perf-id', default='',
+ help='The perf builder id')
+ option_parser.add_option('--perf-config', default='',
+ help='Perf configuration dictionary (as a string). '
+ 'This allows to specify custom revisions to be '
+ 'the main revision at the Perf dashboard. '
+ 'Example: --perf-config="{\'a_default_rev\': '
+ '\'r_webrtc_rev\'}"')
+ option_parser.add_option('--supplemental-columns-file',
+ default='supplemental_columns',
+ help='A file containing a JSON blob with a dict '
+ 'that will be uploaded to the results '
+ 'dashboard as supplemental columns.')
+ option_parser.add_option('--revision',
+ help='The revision number which will be is used as '
+ 'primary key by the dashboard. If omitted it '
+ 'is automatically extracted from the checkout.')
+ option_parser.add_option('--webkit-revision',
+ help='See --revision.')
+ option_parser.add_option('--enable-asan', action='store_true', default=False,
+ help='Enable fast memory error detection '
+ '(AddressSanitizer).')
+ option_parser.add_option('--enable-lsan', action='store_true', default=False,
+ help='Enable memory leak detection (LeakSanitizer).')
+ option_parser.add_option('--enable-msan', action='store_true', default=False,
+ help='Enable uninitialized memory reads detection '
+ '(MemorySanitizer).')
+ option_parser.add_option('--enable-tsan', action='store_true', default=False,
+ help='Enable data race detection '
+ '(ThreadSanitizer).')
+ option_parser.add_option('--strip-path-prefix',
+ default='build/src/out/Release/../../',
+ help='Source paths in stack traces will be stripped '
+ 'of prefixes ending with this substring. This '
+ 'option is used by sanitizer tools.')
+ option_parser.add_option('--no-spawn-dbus', action='store_true',
+ default=False,
+ help='Disable GLib DBus bug workaround: '
+ 'manually spawning dbus-launch')
+ option_parser.add_option('--test-launcher-summary-output',
+ help='Path to test results file with all the info '
+ 'from the test launcher')
+ option_parser.add_option('--flakiness-dashboard-server',
+ help='The flakiness dashboard server to which the '
+ 'results should be uploaded.')
+ option_parser.add_option('--verbose', action='store_true', default=False,
+ help='Prints more information.')
+
+ chromium_utils.AddPropertiesOptions(option_parser)
+ options, args = option_parser.parse_args()
+
+ # Initialize logging.
+ log_level = logging.INFO
+ if options.verbose:
+ log_level = logging.DEBUG
+ logging.basicConfig(level=log_level,
+ format='%(asctime)s %(filename)s:%(lineno)-3d'
+ ' %(levelname)s %(message)s',
+ datefmt='%y%m%d %H:%M:%S')
+ logging.basicConfig(level=logging.DEBUG)
+ logging.getLogger().addHandler(logging.StreamHandler(stream=sys.stdout))
+
+ if not options.perf_dashboard_id:
+ options.perf_dashboard_id = options.factory_properties.get('test_name')
+
+ options.test_type = options.test_type or options.factory_properties.get(
+ 'step_name', '')
+
+ if options.run_shell_script and options.run_python_script:
+ sys.stderr.write('Use either --run-shell-script OR --run-python-script, '
+ 'not both.')
+ return 1
+
+ print '[Running on builder: "%s"]' % options.builder_name
+
+ did_launch_dbus = False
+ if not options.no_spawn_dbus:
+ did_launch_dbus = _LaunchDBus()
+
+ try:
+ options.build_dir = build_directory.GetBuildOutputDirectory()
+
+ if options.pass_target and options.target:
+ args.extend(['--target', options.target])
+ if options.pass_build_dir:
+ args.extend(['--build-dir', options.build_dir])
+
+ # We will use this to accumulate overrides for the command under test,
+ # That we may not need or want for other support commands.
+ extra_env = {}
+
+ # This option is used by sanitizer code. There is no corresponding command
+ # line flag.
+ options.use_symbolization_script = False
+ # Set up extra environment and args for sanitizer tools.
+ _ConfigureSanitizerTools(options, args, extra_env)
+
+ # Set the number of shards environment variables.
+ # NOTE: Chromium's test launcher will ignore these in favor of the command
+ # line flags passed in _BuildTestBinaryCommand.
+ if options.total_shards and options.shard_index:
+ extra_env['GTEST_TOTAL_SHARDS'] = str(options.total_shards)
+ extra_env['GTEST_SHARD_INDEX'] = str(options.shard_index - 1)
+
+ # If perf config is passed via command line, parse the string into a dict.
+ if options.perf_config:
+ try:
+ options.perf_config = ast.literal_eval(options.perf_config)
+ assert type(options.perf_config) is dict, (
+ 'Value of --perf-config couldn\'t be evaluated into a dict.')
+ except (exceptions.SyntaxError, ValueError):
+ option_parser.error('Failed to parse --perf-config value into a dict: '
+ '%s' % options.perf_config)
+ return 1
+
+ # Allow factory property 'perf_config' as well during a transition period.
+ options.perf_config = (options.perf_config or
+ options.factory_properties.get('perf_config'))
+
+ if options.results_directory:
+ options.test_output_xml = os.path.normpath(os.path.abspath(os.path.join(
+ options.results_directory, '%s.xml' % options.test_type)))
+ args.append('--gtest_output=xml:' + options.test_output_xml)
+ elif options.generate_json_file:
+ option_parser.error(
+ '--results-directory is required with --generate-json-file=True')
+ return 1
+
+ if options.factory_properties.get('coverage_gtest_exclusions', False):
+ _BuildCoverageGtestExclusions(options, args)
+
+ temp_files = _GetTempCount()
+ if options.parse_input:
+ result = _MainParse(options, args)
+ elif sys.platform.startswith('darwin'):
+ test_platform = options.factory_properties.get(
+ 'test_platform', options.test_platform)
+ if test_platform in ('ios-simulator',):
+ result = _MainIOS(options, args, extra_env)
+ else:
+ result = _MainMac(options, args, extra_env)
+ elif sys.platform == 'win32':
+ result = _MainWin(options, args, extra_env)
+ elif sys.platform == 'linux2':
+ if options.factory_properties.get('test_platform',
+ options.test_platform) == 'android':
+ result = _MainAndroid(options, args, extra_env)
+ else:
+ result = _MainLinux(options, args, extra_env)
+ else:
+ sys.stderr.write('Unknown sys.platform value %s\n' % repr(sys.platform))
+ return 1
+
+ _UploadProfilingData(options, args)
+
+ new_temp_files = _GetTempCount()
+ if temp_files > new_temp_files:
+ print >> sys.stderr, (
+ 'Confused: %d files were deleted from %s during the test run') % (
+ (temp_files - new_temp_files), tempfile.gettempdir())
+ elif temp_files < new_temp_files:
+ print >> sys.stderr, (
+ '%d new files were left in %s: Fix the tests to clean up themselves.'
+ ) % ((new_temp_files - temp_files), tempfile.gettempdir())
+ # TODO(maruel): Make it an error soon. Not yet since I want to iron
+ # out all the remaining cases before.
+ #result = 1
+ return result
+ finally:
+ if did_launch_dbus:
+ # It looks like the command line argument --exit-with-session
+ # isn't working to clean up the spawned dbus-daemon. Kill it
+ # manually.
+ _ShutdownDBus()
+
+
+if '__main__' == __name__:
+ sys.exit(main())
diff --git a/infra/scripts/legacy/scripts/slave/slave_utils.py b/infra/scripts/legacy/scripts/slave/slave_utils.py
new file mode 100644
index 0000000..98435e1
--- /dev/null
+++ b/infra/scripts/legacy/scripts/slave/slave_utils.py
@@ -0,0 +1,735 @@
+# Copyright (c) 2012 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""Functions specific to build slaves, shared by several buildbot scripts.
+"""
+
+import datetime
+import glob
+import os
+import re
+import shutil
+import sys
+import tempfile
+import time
+
+from common import chromium_utils
+from slave.bootstrap import ImportMasterConfigs # pylint: disable=W0611
+from common.chromium_utils import GetActiveMaster # pylint: disable=W0611
+
+# These codes used to distinguish true errors from script warnings.
+ERROR_EXIT_CODE = 1
+WARNING_EXIT_CODE = 88
+
+
+# Local errors.
+class PageHeapError(Exception):
+ pass
+
+
+# Cache the path to gflags.exe.
+_gflags_exe = None
+
+
+def SubversionExe():
+ # TODO(pamg): move this into platform_utils to support Mac and Linux.
+ if chromium_utils.IsWindows():
+ return 'svn.bat' # Find it in the user's path.
+ elif chromium_utils.IsLinux() or chromium_utils.IsMac():
+ return 'svn' # Find it in the user's path.
+ else:
+ raise NotImplementedError(
+ 'Platform "%s" is not currently supported.' % sys.platform)
+
+
+def GitExe():
+ return 'git.bat' if chromium_utils.IsWindows() else 'git'
+
+
+def SubversionCat(wc_dir):
+ """Output the content of specified files or URLs in SVN.
+ """
+ try:
+ return chromium_utils.GetCommandOutput([SubversionExe(), 'cat',
+ wc_dir])
+ except chromium_utils.ExternalError:
+ return None
+
+
+class NotGitWorkingCopy(Exception): pass
+class NotSVNWorkingCopy(Exception): pass
+class NotAnyWorkingCopy(Exception): pass
+class InvalidSVNRevision(Exception): pass
+
+
+def ScrapeSVNInfoRevision(wc_dir, regexp):
+ """Runs 'svn info' on a working copy and applies the supplied regex and
+ returns the matched group as an int.
+ regexp can be either a compiled regex or a string regex.
+ throws NotSVNWorkingCopy if wc_dir is not in a working copy.
+ throws InvalidSVNRevision if matched group is not alphanumeric.
+ """
+ if isinstance(regexp, (str, unicode)):
+ regexp = re.compile(regexp)
+ retval, svn_info = chromium_utils.GetStatusOutput([SubversionExe(), 'info',
+ wc_dir])
+ if retval or 'is not a working copy' in svn_info:
+ raise NotSVNWorkingCopy(wc_dir)
+ match = regexp.search(svn_info)
+ if not match or not match.groups():
+ raise InvalidSVNRevision(
+ '%s did not match in svn info %s.' % (regexp.pattern, svn_info))
+ text = match.group(1)
+ if text.isalnum():
+ return int(text)
+ else:
+ raise InvalidSVNRevision(text)
+
+
+def SubversionRevision(wc_dir):
+ """Finds the last svn revision of a working copy and returns it as an int."""
+ return ScrapeSVNInfoRevision(wc_dir, r'(?s).*Revision: (\d+).*')
+
+
+def SubversionLastChangedRevision(wc_dir_or_file):
+ """Finds the last changed svn revision of a fs path returns it as an int."""
+ return ScrapeSVNInfoRevision(wc_dir_or_file,
+ r'(?s).*Last Changed Rev: (\d+).*')
+
+
+def GitHash(wc_dir):
+ """Finds the current commit hash of the wc_dir."""
+ retval, text = chromium_utils.GetStatusOutput(
+ [GitExe(), 'rev-parse', 'HEAD'], cwd=wc_dir)
+ if retval or 'fatal: Not a git repository' in text:
+ raise NotGitWorkingCopy(wc_dir)
+ return text.strip()
+
+
+def GetHashOrRevision(wc_dir):
+ """Gets the svn revision or git hash of wc_dir as a string. Throws
+ NotAnyWorkingCopy if neither are appropriate."""
+ try:
+ return str(SubversionRevision(wc_dir))
+ except NotSVNWorkingCopy:
+ pass
+ try:
+ return GitHash(wc_dir)
+ except NotGitWorkingCopy:
+ pass
+ raise NotAnyWorkingCopy(wc_dir)
+
+
+def GitOrSubversion(wc_dir):
+ """Returns the VCS for the given directory.
+
+ Returns:
+ 'svn' if the directory is a valid svn repo
+ 'git' if the directory is a valid git repo root
+ None otherwise
+ """
+ ret, out = chromium_utils.GetStatusOutput([SubversionExe(), 'info', wc_dir])
+ if not ret and 'is not a working copy' not in out:
+ return 'svn'
+
+ ret, out = chromium_utils.GetStatusOutput(
+ [GitExe(), 'rev-parse', '--is-inside-work-tree'], cwd=wc_dir)
+ if not ret and 'fatal: Not a git repository' not in out:
+ return 'git'
+
+ return None
+
+
+def GetBuildRevisions(src_dir, webkit_dir=None, revision_dir=None):
+ """Parses build revisions out of the provided directories.
+
+ Args:
+ src_dir: The source directory to be used to check the revision in.
+ webkit_dir: Optional WebKit directory, relative to src_dir.
+ revision_dir: If provided, this dir will be used for the build revision
+ instead of the mandatory src_dir.
+
+ Returns a tuple of the build revision and (optional) WebKit revision.
+ NOTICE: These revisions are strings, since they can be both Subversion numbers
+ and Git hashes.
+ """
+ abs_src_dir = os.path.abspath(src_dir)
+ webkit_revision = None
+ if webkit_dir:
+ webkit_dir = os.path.join(abs_src_dir, webkit_dir)
+ webkit_revision = GetHashOrRevision(webkit_dir)
+
+ if revision_dir:
+ revision_dir = os.path.join(abs_src_dir, revision_dir)
+ build_revision = GetHashOrRevision(revision_dir)
+ else:
+ build_revision = GetHashOrRevision(src_dir)
+ return (build_revision, webkit_revision)
+
+
+def GetZipFileNames(build_properties, build_revision, webkit_revision=None,
+ extract=False, use_try_buildnumber=True):
+ base_name = 'full-build-%s' % chromium_utils.PlatformName()
+
+ if 'try' in build_properties.get('mastername', '') and use_try_buildnumber:
+ if extract:
+ if not build_properties.get('parent_buildnumber'):
+ raise Exception('build_props does not have parent data: %s' %
+ build_properties)
+ version_suffix = '_%(parent_buildnumber)s' % build_properties
+ else:
+ version_suffix = '_%(buildnumber)s' % build_properties
+ elif webkit_revision:
+ version_suffix = '_wk%s_%s' % (webkit_revision, build_revision)
+ else:
+ version_suffix = '_%s' % build_revision
+
+ return base_name, version_suffix
+
+
+def SlaveBuildName(chrome_dir):
+ """Extracts the build name of this slave (e.g., 'chrome-release') from the
+ leaf subdir of its build directory.
+ """
+ return os.path.basename(SlaveBaseDir(chrome_dir))
+
+
+def SlaveBaseDir(chrome_dir):
+ """Finds the full path to the build slave's base directory (e.g.
+ 'c:/b/chrome/chrome-release'). This is assumed to be the parent of the
+ shallowest 'build' directory in the chrome_dir path.
+
+ Raises chromium_utils.PathNotFound if there is no such directory.
+ """
+ result = ''
+ prev_dir = ''
+ curr_dir = chrome_dir
+ while prev_dir != curr_dir:
+ (parent, leaf) = os.path.split(curr_dir)
+ if leaf == 'build':
+ # Remember this one and keep looking for something shallower.
+ result = parent
+ if leaf == 'slave':
+ # We are too deep, stop now.
+ break
+ prev_dir = curr_dir
+ curr_dir = parent
+ if not result:
+ raise chromium_utils.PathNotFound('Unable to find slave base dir above %s' %
+ chrome_dir)
+ return result
+
+
+def GetStagingDir(start_dir):
+ """Creates a chrome_staging dir in the starting directory. and returns its
+ full path.
+ """
+ start_dir = os.path.abspath(start_dir)
+ staging_dir = os.path.join(SlaveBaseDir(start_dir), 'chrome_staging')
+ chromium_utils.MaybeMakeDirectory(staging_dir)
+ return staging_dir
+
+
+def SetPageHeap(chrome_dir, exe, enable):
+ """Enables or disables page-heap checking in the given executable, depending
+ on the 'enable' parameter. gflags_exe should be the full path to gflags.exe.
+ """
+ global _gflags_exe
+ if _gflags_exe is None:
+ _gflags_exe = chromium_utils.FindUpward(chrome_dir,
+ 'tools', 'memory', 'gflags.exe')
+ command = [_gflags_exe]
+ if enable:
+ command.extend(['/p', '/enable', exe, '/full'])
+ else:
+ command.extend(['/p', '/disable', exe])
+ result = chromium_utils.RunCommand(command)
+ if result:
+ description = {True: 'enable', False: 'disable'}
+ raise PageHeapError('Unable to %s page heap for %s.' %
+ (description[enable], exe))
+
+
+def LongSleep(secs):
+ """A sleep utility for long durations that avoids appearing hung.
+
+ Sleeps for the specified duration. Prints output periodically so as not to
+ look hung in order to avoid being timed out. Since this function is meant
+ for long durations, it assumes that the caller does not care about losing a
+ small amount of precision.
+
+ Args:
+ secs: The time to sleep, in seconds.
+ """
+ secs_per_iteration = 60
+ time_slept = 0
+
+ # Make sure we are dealing with an integral duration, since this function is
+ # meant for long-lived sleeps we don't mind losing floating point precision.
+ secs = int(round(secs))
+
+ remainder = secs % secs_per_iteration
+ if remainder > 0:
+ time.sleep(remainder)
+ time_slept += remainder
+ sys.stdout.write('.')
+ sys.stdout.flush()
+
+ while time_slept < secs:
+ time.sleep(secs_per_iteration)
+ time_slept += secs_per_iteration
+ sys.stdout.write('.')
+ sys.stdout.flush()
+
+ sys.stdout.write('\n')
+
+
+def RunPythonCommandInBuildDir(build_dir, target, command_line_args,
+ server_dir=None, filter_obj=None):
+ if sys.platform == 'win32':
+ python_exe = 'python.exe'
+ else:
+ os.environ['PYTHONPATH'] = (chromium_utils.FindUpward(build_dir, 'tools',
+ 'python')
+ + ':' +os.environ.get('PYTHONPATH', ''))
+ python_exe = 'python'
+
+ command = [python_exe] + command_line_args
+ return chromium_utils.RunCommand(command, filter_obj=filter_obj)
+
+
+class RunCommandCaptureFilter(object):
+ lines = []
+
+ def FilterLine(self, in_line):
+ self.lines.append(in_line)
+ return None
+
+ def FilterDone(self, last_bits):
+ self.lines.append(last_bits)
+ return None
+
+
+def GypFlagIsOn(options, flag):
+ value = GetGypFlag(options, flag, False)
+ # The values we understand as Off are False and a text zero.
+ if value is False or value == '0':
+ return False
+ return True
+
+
+def GetGypFlag(options, flag, default=None):
+ gclient = options.factory_properties.get('gclient_env', {})
+ defines = gclient.get('GYP_DEFINES', '')
+ gypflags = dict([(a, c if b == '=' else True) for (a, b, c) in
+ [x.partition('=') for x in defines.split(' ')]])
+ if flag not in gypflags:
+ return default
+ return gypflags[flag]
+
+
+def GSUtilSetup():
+ # Get the path to the gsutil script.
+ gsutil = os.path.join(os.path.dirname(__file__), 'gsutil')
+ gsutil = os.path.normpath(gsutil)
+ if chromium_utils.IsWindows():
+ gsutil += '.bat'
+
+ # Get the path to the boto file containing the password.
+ boto_file = os.path.join(os.path.dirname(__file__), '..', '..', 'site_config',
+ '.boto')
+
+ # Make sure gsutil uses this boto file if it exists.
+ if os.path.exists(boto_file):
+ os.environ['AWS_CREDENTIAL_FILE'] = boto_file
+ os.environ['BOTO_CONFIG'] = boto_file
+ return gsutil
+
+
+def GSUtilGetMetadataField(name, provider_prefix=None):
+ """Returns: (str) the metadata field to use with Google Storage
+
+ The Google Storage specification for metadata can be found at:
+ https://developers.google.com/storage/docs/gsutil/addlhelp/WorkingWithObjectMetadata
+ """
+ # Already contains custom provider prefix
+ if name.lower().startswith('x-'):
+ return name
+
+ # See if it's innately supported by Google Storage
+ if name in (
+ 'Cache-Control',
+ 'Content-Disposition',
+ 'Content-Encoding',
+ 'Content-Language',
+ 'Content-MD5',
+ 'Content-Type',
+ ):
+ return name
+
+ # Add provider prefix
+ if not provider_prefix:
+ provider_prefix = 'x-goog-meta'
+ return '%s-%s' % (provider_prefix, name)
+
+
+def GSUtilCopy(source, dest, mimetype=None, gs_acl=None, cache_control=None,
+ metadata=None):
+ """Copy a file to Google Storage.
+
+ Runs the following command:
+ gsutil -h Content-Type:<mimetype> \
+ -h Cache-Control:<cache_control> \
+ cp -a <gs_acl> file://<filename> <gs_base>/<subdir>/<filename w/o path>
+
+ Args:
+ source: the source URI
+ dest: the destination URI
+ mimetype: optional value to add as a Content-Type header
+ gs_acl: optional value to add as a canned-acl
+ cache_control: optional value to set Cache-Control header
+ metadata: (dict) A dictionary of string key/value metadata entries to set
+ (see `gsutil cp' '-h' option)
+ Returns:
+ The status code returned from running the generated gsutil command.
+ """
+
+ if not source.startswith('gs://') and not source.startswith('file://'):
+ source = 'file://' + source
+ if not dest.startswith('gs://') and not dest.startswith('file://'):
+ dest = 'file://' + dest
+ gsutil = GSUtilSetup()
+ # Run the gsutil command. gsutil internally calls command_wrapper, which
+ # will try to run the command 10 times if it fails.
+ command = [gsutil]
+
+ if not metadata:
+ metadata = {}
+ if mimetype:
+ metadata['Content-Type'] = mimetype
+ if cache_control:
+ metadata['Cache-Control'] = cache_control
+ for k, v in sorted(metadata.iteritems(), key=lambda (k, _): k):
+ field = GSUtilGetMetadataField(k)
+ param = (field) if v is None else ('%s:%s' % (field, v))
+ command += ['-h', param]
+ command.extend(['cp'])
+ if gs_acl:
+ command.extend(['-a', gs_acl])
+ command.extend([source, dest])
+ return chromium_utils.RunCommand(command)
+
+
+def GSUtilCopyFile(filename, gs_base, subdir=None, mimetype=None, gs_acl=None,
+ cache_control=None, metadata=None):
+ """Copy a file to Google Storage.
+
+ Runs the following command:
+ gsutil -h Content-Type:<mimetype> \
+ -h Cache-Control:<cache_control> \
+ cp -a <gs_acl> file://<filename> <gs_base>/<subdir>/<filename w/o path>
+
+ Args:
+ filename: the file to upload
+ gs_base: the bucket to upload the file to
+ subdir: optional subdirectory withing the bucket
+ mimetype: optional value to add as a Content-Type header
+ gs_acl: optional value to add as a canned-acl
+ Returns:
+ The status code returned from running the generated gsutil command.
+ """
+
+ source = 'file://' + filename
+ dest = gs_base
+ if subdir:
+ # HACK(nsylvain): We can't use normpath here because it will break the
+ # slashes on Windows.
+ if subdir == '..':
+ dest = os.path.dirname(gs_base)
+ else:
+ dest = '/'.join([gs_base, subdir])
+ dest = '/'.join([dest, os.path.basename(filename)])
+ return GSUtilCopy(source, dest, mimetype, gs_acl, cache_control,
+ metadata=metadata)
+
+
+def GSUtilCopyDir(src_dir, gs_base, dest_dir=None, gs_acl=None,
+ cache_control=None):
+ """Upload the directory and its contents to Google Storage."""
+
+ if os.path.isfile(src_dir):
+ assert os.path.isdir(src_dir), '%s must be a directory' % src_dir
+
+ gsutil = GSUtilSetup()
+ command = [gsutil, '-m']
+ if cache_control:
+ command.extend(['-h', 'Cache-Control:%s' % cache_control])
+ command.extend(['cp', '-R'])
+ if gs_acl:
+ command.extend(['-a', gs_acl])
+ if dest_dir:
+ command.extend([src_dir, gs_base + '/' + dest_dir])
+ else:
+ command.extend([src_dir, gs_base])
+ return chromium_utils.RunCommand(command)
+
+def GSUtilDownloadFile(src, dst):
+ """Copy a file from Google Storage."""
+ gsutil = GSUtilSetup()
+
+ # Run the gsutil command. gsutil internally calls command_wrapper, which
+ # will try to run the command 10 times if it fails.
+ command = [gsutil]
+ command.extend(['cp', src, dst])
+ return chromium_utils.RunCommand(command)
+
+
+def GSUtilMoveFile(source, dest, gs_acl=None):
+ """Move a file on Google Storage."""
+
+ gsutil = GSUtilSetup()
+
+ # Run the gsutil command. gsutil internally calls command_wrapper, which
+ # will try to run the command 10 times if it fails.
+ command = [gsutil]
+ command.extend(['mv', source, dest])
+ status = chromium_utils.RunCommand(command)
+
+ if status:
+ return status
+
+ if gs_acl:
+ command = [gsutil]
+ command.extend(['setacl', gs_acl, dest])
+ status = chromium_utils.RunCommand(command)
+
+ return status
+
+
+def GSUtilDeleteFile(filename):
+ """Delete a file on Google Storage."""
+
+ gsutil = GSUtilSetup()
+
+ # Run the gsutil command. gsutil internally calls command_wrapper, which
+ # will try to run the command 10 times if it fails.
+ command = [gsutil]
+ command.extend(['rm', filename])
+ return chromium_utils.RunCommand(command)
+
+
+# Python doesn't support the type of variable scope in nested methods needed
+# to avoid the global output variable. This variable should only ever be used
+# by GSUtilListBucket.
+command_output = ''
+
+
+def GSUtilListBucket(gs_base, args):
+ """List the contents of a Google Storage bucket."""
+
+ gsutil = GSUtilSetup()
+
+ # Run the gsutil command. gsutil internally calls command_wrapper, which
+ # will try to run the command 10 times if it fails.
+ global command_output
+ command_output = ''
+
+ def GatherOutput(line):
+ global command_output
+ command_output += line + '\n'
+ command = [gsutil, 'ls'] + args + [gs_base]
+ status = chromium_utils.RunCommand(command, parser_func=GatherOutput)
+ return (status, command_output)
+
+
+def LogAndRemoveFiles(temp_dir, regex_pattern):
+ """Removes files in |temp_dir| that match |regex_pattern|.
+ This function prints out the name of each directory or filename before
+ it deletes the file from disk."""
+ regex = re.compile(regex_pattern)
+ if not os.path.isdir(temp_dir):
+ return
+ for dir_item in os.listdir(temp_dir):
+ if regex.search(dir_item):
+ full_path = os.path.join(temp_dir, dir_item)
+ print 'Removing leaked temp item: %s' % full_path
+ try:
+ if os.path.islink(full_path) or os.path.isfile(full_path):
+ os.remove(full_path)
+ elif os.path.isdir(full_path):
+ chromium_utils.RemoveDirectory(full_path)
+ else:
+ print 'Temp item wasn\'t a file or directory?'
+ except OSError, e:
+ print >> sys.stderr, e
+ # Don't fail.
+
+
+def RemoveOldSnapshots(desktop):
+ """Removes ChromiumSnapshot files more than one day old. Such snapshots are
+ created when certain tests timeout (e.g., Chrome Frame integration tests)."""
+ # Compute the file prefix of a snapshot created one day ago.
+ yesterday = datetime.datetime.now() - datetime.timedelta(1)
+ old_snapshot = yesterday.strftime('ChromiumSnapshot%Y%m%d%H%M%S')
+ # Collect snapshots at least as old as that one created a day ago.
+ to_delete = []
+ for snapshot in glob.iglob(os.path.join(desktop, 'ChromiumSnapshot*.png')):
+ if os.path.basename(snapshot) < old_snapshot:
+ to_delete.append(snapshot)
+ # Delete the collected snapshots.
+ for snapshot in to_delete:
+ print 'Removing old snapshot: %s' % snapshot
+ try:
+ os.remove(snapshot)
+ except OSError, e:
+ print >> sys.stderr, e
+
+
+def RemoveChromeDesktopFiles():
+ """Removes Chrome files (i.e. shortcuts) from the desktop of the current user.
+ This does nothing if called on a non-Windows platform."""
+ if chromium_utils.IsWindows():
+ desktop_path = os.environ['USERPROFILE']
+ desktop_path = os.path.join(desktop_path, 'Desktop')
+ LogAndRemoveFiles(desktop_path, r'^(Chromium|chrome) \(.+\)?\.lnk$')
+ RemoveOldSnapshots(desktop_path)
+
+
+def RemoveJumpListFiles():
+ """Removes the files storing jump list history.
+ This does nothing if called on a non-Windows platform."""
+ if chromium_utils.IsWindows():
+ custom_destination_path = os.path.join(os.environ['USERPROFILE'],
+ 'AppData',
+ 'Roaming',
+ 'Microsoft',
+ 'Windows',
+ 'Recent',
+ 'CustomDestinations')
+ LogAndRemoveFiles(custom_destination_path, '.+')
+
+
+def RemoveTempDirContents():
+ """Obliterate the entire contents of the temporary directory, excluding
+ paths in sys.argv.
+ """
+ temp_dir = os.path.abspath(tempfile.gettempdir())
+ print 'Removing contents of %s' % temp_dir
+
+ print ' Inspecting args for files to skip'
+ whitelist = set()
+ for i in sys.argv:
+ try:
+ if '=' in i:
+ i = i.split('=')[1]
+ low = os.path.abspath(i.lower())
+ if low.startswith(temp_dir.lower()):
+ whitelist.add(low)
+ except TypeError:
+ # If the argument is too long, windows will freak out and pop a TypeError.
+ pass
+ if whitelist:
+ print ' Whitelisting:'
+ for w in whitelist:
+ print ' %r' % w
+
+ start_time = time.time()
+ for root, dirs, files in os.walk(temp_dir):
+ for f in files:
+ p = os.path.join(root, f)
+ if p.lower() not in whitelist:
+ try:
+ os.remove(p)
+ except OSError:
+ pass
+ else:
+ print ' Keeping file %r (whitelisted)' % p
+ for d in dirs[:]:
+ p = os.path.join(root, d)
+ if p.lower() not in whitelist:
+ try:
+ # TODO(iannucci): Make this deal with whitelisted items which are
+ # inside of |d|
+
+ # chromium_utils.RemoveDirectory gives access denied error when called
+ # in this loop.
+ shutil.rmtree(p, ignore_errors=True)
+ # Remove it so that os.walk() doesn't try to recurse into
+ # a non-existing directory.
+ dirs.remove(d)
+ except OSError:
+ pass
+ else:
+ print ' Keeping dir %r (whitelisted)' % p
+ print ' Removing temp contents took %.1f s' % (time.time() - start_time)
+
+
+def RemoveChromeTemporaryFiles():
+ """A large hammer to nuke what could be leaked files from unittests or
+ files left from a unittest that crashed, was killed, etc."""
+ # NOTE: print out what is cleaned up so the bots don't timeout if
+ # there is a lot to cleanup and also se we see the leaks in the
+ # build logs.
+ # At some point a leading dot got added, support with and without it.
+ kLogRegex = r'^\.?(com\.google\.Chrome|org\.chromium)\.'
+ if chromium_utils.IsWindows():
+ RemoveTempDirContents()
+ RemoveChromeDesktopFiles()
+ RemoveJumpListFiles()
+ elif chromium_utils.IsLinux():
+ LogAndRemoveFiles(tempfile.gettempdir(), kLogRegex)
+ LogAndRemoveFiles('/dev/shm', kLogRegex)
+ elif chromium_utils.IsMac():
+ nstempdir_path = '/usr/local/libexec/nstempdir'
+ if os.path.exists(nstempdir_path):
+ ns_temp_dir = chromium_utils.GetCommandOutput([nstempdir_path]).strip()
+ if ns_temp_dir:
+ LogAndRemoveFiles(ns_temp_dir, kLogRegex)
+ for i in ('Chromium', 'Google Chrome'):
+ # Remove dumps.
+ crash_path = '%s/Library/Application Support/%s/Crash Reports' % (
+ os.environ['HOME'], i)
+ LogAndRemoveFiles(crash_path, r'^.+\.dmp$')
+ else:
+ raise NotImplementedError(
+ 'Platform "%s" is not currently supported.' % sys.platform)
+
+
+def WriteLogLines(logname, lines, perf=None):
+ logname = logname.rstrip()
+ lines = [line.rstrip() for line in lines]
+ for line in lines:
+ print '@@@STEP_LOG_LINE@%s@%s@@@' % (logname, line)
+ if perf:
+ perf = perf.rstrip()
+ print '@@@STEP_LOG_END_PERF@%s@%s@@@' % (logname, perf)
+ else:
+ print '@@@STEP_LOG_END@%s@@@' % logname
+
+
+def ZipAndUpload(bucket, archive, *targets):
+ """Uploads a zipped archive to the specified Google Storage bucket.
+
+ Args:
+ bucket: Google Storage bucket to upload to.
+ archive: Name of the .zip archive.
+ *targets: List of targets that should be included in the archive.
+
+ Returns:
+ Path to the uploaded archive on Google Storage.
+ """
+ local_archive = os.path.join(tempfile.mkdtemp(archive), archive)
+ zip_cmd = [
+ 'zip',
+ '-9',
+ '--filesync',
+ '--recurse-paths',
+ '--symlinks',
+ local_archive,
+ ]
+ zip_cmd.extend(targets)
+
+ chromium_utils.RunCommand(zip_cmd)
+ GSUtilCopy(local_archive, 'gs://%s/%s' % (bucket, archive))
+ return 'https://storage.cloud.google.com/%s/%s' % (bucket, archive)
diff --git a/infra/scripts/legacy/scripts/slave/telemetry_utils.py b/infra/scripts/legacy/scripts/slave/telemetry_utils.py
new file mode 100755
index 0000000..e3ea592
--- /dev/null
+++ b/infra/scripts/legacy/scripts/slave/telemetry_utils.py
@@ -0,0 +1,114 @@
+#! /usr/bin/env python
+# Copyright 2014 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+# pylint: disable=R0201
+
+"""Log parsing for telemetry tests.
+
+The TelemetryResultsProcessor loads and contains results that were output in
+JSON format from Telemetry. It can be used as a replacement for the classes in
+the performance_log_processor module.
+"""
+
+import json
+import logging
+import os
+
+from slave.performance_log_processor import _FormatHumanReadable
+
+
+class TelemetryResultsProcessor(object):
+
+ def __init__(self, filename, is_ref, cleanup_parent_dir):
+ self._chart_filename = filename
+ self._is_reference_build = is_ref
+ self._cleanup_parent_dir = cleanup_parent_dir
+
+ def ChartJson(self):
+ try:
+ return json.load(open(self._chart_filename))
+ except (IOError, ValueError):
+ logging.error('Error reading telemetry results from %s',
+ self._chart_filename)
+ logging.error('This usually means that telemetry did not run, so it could'
+ ' not generate the file. Please check the device running the test.')
+ return None
+
+ def Cleanup(self):
+ try:
+ os.remove(self._chart_filename)
+ except OSError:
+ logging.error('Unable to remove telemetry output file %s',
+ self._chart_filename)
+ if self._cleanup_parent_dir:
+ try:
+ os.rmdir(os.path.dirname(self._chart_filename))
+ except OSError:
+ logging.error('Unable to remove telemetry output dir %s',
+ os.path.dirname(self._chart_filename))
+
+ def IsChartJson(self):
+ """This is the new telemetry --chartjson output format."""
+ return True
+
+ def IsReferenceBuild(self):
+ return self._is_reference_build
+
+ def ProcessLine(self, line):
+ pass
+
+ def FailedTests(self):
+ return []
+
+ def MemoryToolReportHashes(self): # pylint: disable=R0201
+ return []
+
+ def ParsingErrors(self): # pylint: disable=R0201
+ return []
+
+ def PerformanceSummary(self):
+ """Writes the waterfall display text.
+
+ The waterfall contains lines for each important trace, in the form
+ tracename: value< (refvalue)>
+ """
+ if self._is_reference_build:
+ return []
+
+ chartjson_data = self.ChartJson()
+ if not chartjson_data:
+ return []
+
+ charts = chartjson_data.get('charts')
+ if not charts:
+ return []
+
+ def _summary_to_string(chart_name, chart_values):
+ summary = chart_values.get('summary')
+ if not summary:
+ return None
+
+ important = summary.get('important')
+ if not important:
+ return None
+
+ value_type = summary.get('type')
+ if value_type == 'list_of_scalar_values':
+ values = summary.get('values')
+ if not values or None in values:
+ return '%s: %s' % (chart_name, 'NaN')
+ else:
+ mean = sum(values) / float(len(values))
+ return '%s: %s' % (chart_name, _FormatHumanReadable(mean))
+ elif value_type == 'scalar':
+ value = summary.get('value')
+ if value is None:
+ return '%s: %s' % (chart_name, 'NaN')
+ else:
+ return '%s: %s' % (chart_name, _FormatHumanReadable(value))
+ return None
+
+ gen = (_summary_to_string(chart_name, chart_values)
+ for chart_name, chart_values in sorted(charts.iteritems()))
+ return [i for i in gen if i]
diff --git a/infra/scripts/legacy/scripts/slave/xvfb.py b/infra/scripts/legacy/scripts/slave/xvfb.py
new file mode 100644
index 0000000..f5f3577
--- /dev/null
+++ b/infra/scripts/legacy/scripts/slave/xvfb.py
@@ -0,0 +1,138 @@
+# Copyright (c) 2012 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""Functions to setup xvfb, which is used by the linux machines.
+"""
+
+import os
+import platform
+import signal
+import subprocess
+import tempfile
+import time
+
+def _XvfbDisplayIndex(slave_build_name):
+ return '9'
+
+def _XvfbPidFilename(slave_build_name):
+ """Returns the filename to the Xvfb pid file. This name is unique for each
+ builder. This is used by the linux builders."""
+ return os.path.join(tempfile.gettempdir(),
+ 'xvfb-' + _XvfbDisplayIndex(slave_build_name) + '.pid')
+
+
+def StartVirtualX(slave_build_name, build_dir, with_wm=True, server_dir=None):
+ """Start a virtual X server and set the DISPLAY environment variable so sub
+ processes will use the virtual X server. Also start openbox. This only works
+ on Linux and assumes that xvfb and openbox are installed.
+
+ Args:
+ slave_build_name: The name of the build that we use for the pid file.
+ E.g., webkit-rel-linux.
+ build_dir: The directory where binaries are produced. If this is non-empty,
+ we try running xdisplaycheck from |build_dir| to verify our X
+ connection.
+ with_wm: Whether we add a window manager to the display too.
+ server_dir: Directory to search for the server executable.
+ """
+ # We use a pid file to make sure we don't have any xvfb processes running
+ # from a previous test run.
+ StopVirtualX(slave_build_name)
+
+ xdisplaycheck_path = None
+ if build_dir:
+ xdisplaycheck_path = os.path.join(build_dir, 'xdisplaycheck')
+
+ display = ':%s' % _XvfbDisplayIndex(slave_build_name)
+ # Note we don't add the optional screen here (+ '.0')
+ os.environ['DISPLAY'] = display
+
+ if xdisplaycheck_path and os.path.exists(xdisplaycheck_path):
+ print 'Verifying Xvfb is not running ...'
+ checkstarttime = time.time()
+ xdisplayproc = subprocess.Popen([xdisplaycheck_path, '--noserver'],
+ stdout=subprocess.PIPE,
+ stderr=subprocess.STDOUT)
+ # Wait for xdisplaycheck to exit.
+ logs = xdisplayproc.communicate()[0]
+ if xdisplayproc.returncode == 0:
+ print 'xdisplaycheck says there is a display still running, exiting...'
+ raise Exception('Display already present.')
+
+ # Figure out which X server to try.
+ cmd = 'Xvfb'
+ if server_dir and os.path.exists(server_dir):
+ cmd = os.path.join(server_dir, 'Xvfb.' + platform.architecture()[0])
+ if not os.path.exists(cmd):
+ cmd = os.path.join(server_dir, 'Xvfb')
+ if not os.path.exists(cmd):
+ print 'No Xvfb found in designated server path:', server_dir
+ raise Exception('No virtual server')
+
+ # Start a virtual X server that we run the tests in. This makes it so we can
+ # run the tests even if we didn't start the tests from an X session.
+ proc = subprocess.Popen([cmd, display, '-screen', '0', '1024x768x24', '-ac',
+ '-dpi', '96'],
+ stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
+ xvfb_pid_filename = _XvfbPidFilename(slave_build_name)
+ open(xvfb_pid_filename, 'w').write(str(proc.pid))
+
+ # Verify that Xvfb has started by using xdisplaycheck.
+ if xdisplaycheck_path and os.path.exists(xdisplaycheck_path):
+ print 'Verifying Xvfb has started...'
+ checkstarttime = time.time()
+ xdisplayproc = subprocess.Popen([xdisplaycheck_path],
+ stdout=subprocess.PIPE,
+ stderr=subprocess.STDOUT)
+ # Wait for xdisplaycheck to exit.
+ logs = xdisplayproc.communicate()[0]
+ checktime = time.time() - checkstarttime
+ if xdisplayproc.returncode != 0:
+ print 'xdisplaycheck failed after %d seconds.' % checktime
+ print 'xdisplaycheck output:'
+ for l in logs.splitlines():
+ print '> %s' % l
+ rc = proc.poll()
+ if rc is None:
+ print 'Xvfb still running, stopping.'
+ proc.terminate()
+ else:
+ print 'Xvfb exited, code %d' % rc
+
+ print 'Xvfb output:'
+ for l in proc.communicate()[0].splitlines():
+ print '> %s' % l
+ raise Exception(logs)
+ else:
+ print 'xdisplaycheck succeeded after %d seconds.' % checktime
+ print 'xdisplaycheck output:'
+ for l in logs.splitlines():
+ print '> %s' % l
+ print '...OK'
+
+ if with_wm:
+ # Some ChromeOS tests need a window manager.
+ subprocess.Popen('openbox', stdout=subprocess.PIPE,
+ stderr=subprocess.STDOUT)
+ print 'Window manager (openbox) started.'
+ else:
+ print 'No window manager required.'
+
+
+
+def StopVirtualX(slave_build_name):
+ """Try and stop the virtual X server if one was started with StartVirtualX.
+ When the X server dies, it takes down the window manager with it.
+ If a virtual x server is not running, this method does nothing."""
+ xvfb_pid_filename = _XvfbPidFilename(slave_build_name)
+ if os.path.exists(xvfb_pid_filename):
+ xvfb_pid = int(open(xvfb_pid_filename).read())
+ print 'Stopping Xvfb with pid %d ...' % xvfb_pid
+ # If the process doesn't exist, we raise an exception that we can ignore.
+ try:
+ os.kill(xvfb_pid, signal.SIGKILL)
+ except OSError:
+ print '... killing failed, presuming unnecessary.'
+ os.remove(xvfb_pid_filename)
+ print 'Xvfb pid file removed'
diff --git a/infra/scripts/legacy/site_config/config.py b/infra/scripts/legacy/site_config/config.py
new file mode 100644
index 0000000..7c9ba9a
--- /dev/null
+++ b/infra/scripts/legacy/site_config/config.py
@@ -0,0 +1,32 @@
+# Copyright (c) 2012 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""Declares a number of site-dependent variables for use by scripts.
+"""
+
+import os
+from twisted.spread import banana
+
+# DatabaseSetup was moved. Import it for backward compatibility
+from common.chromium_utils import DatabaseSetup # pylint: disable=W0611
+from config_bootstrap import config_private # pylint: disable=W0403,W0611
+from config_bootstrap import Master # pylint: disable=W0403,W0611
+
+SITE_CONFIG_PATH = os.path.abspath(os.path.dirname(__file__))
+
+# By default, the banana's string size limit is 640kb, which is unsufficient
+# when passing diff's around. Raise it to 100megs. Do this here since the limit
+# is enforced on both the server and the client so both need to raise the
+# limit.
+banana.SIZE_LIMIT = 100 * 1024 * 1024
+
+
+def SiteConfigPath():
+ return SITE_CONFIG_PATH
+
+
+
+
+
+
diff --git a/infra/scripts/legacy/site_config/config_bootstrap.py b/infra/scripts/legacy/site_config/config_bootstrap.py
new file mode 100644
index 0000000..8777cab
--- /dev/null
+++ b/infra/scripts/legacy/site_config/config_bootstrap.py
@@ -0,0 +1,128 @@
+# Copyright (c) 2012 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""
+Site configuration information that is sufficient to configure a slave,
+without loading any buildbot or twisted code.
+"""
+
+import inspect
+import os
+
+BASE_DIR = os.path.dirname(os.path.abspath(__file__))
+
+# Override config_default with a config_private file.
+BASE_MASTERS = []
+try:
+ import config_private # pylint: disable=F0401
+ BASE_MASTERS += [config_private.Master, config_private.PublicMaster]
+except ImportError:
+ import config_default as config_private # pylint: disable=W0403
+ BASE_MASTERS += [config_private.Master,]
+
+
+class Master(config_private.Master):
+ """Buildbot master configuration options."""
+
+ trunk_url = (config_private.Master.server_url +
+ config_private.Master.repo_root + '/trunk')
+
+ webkit_trunk_url = (config_private.Master.webkit_root_url + '/trunk')
+
+ trunk_url_src = config_private.Master.git_server_url + '/chromium/src.git'
+ trunk_url_tools = trunk_url + '/tools'
+ nacl_url = config_private.Master.nacl_trunk_url + '/src/native_client'
+ nacl_sdk_root_url = 'https://nativeclient-sdk.googlecode.com/svn'
+ nacl_ports_trunk_url = 'https://naclports.googlecode.com/svn/trunk'
+ nacl_ports_url = nacl_ports_trunk_url + '/src'
+ gears_url = 'http://gears.googlecode.com/svn/trunk'
+ gyp_trunk_url = 'http://gyp.googlecode.com/svn/trunk'
+ branch_url = (config_private.Master.server_url +
+ config_private.Master.repo_root + '/branches')
+ merge_branch_url = branch_url + '/chrome_webkit_merge_branch'
+ merge_branch_url_src = merge_branch_url + '/src'
+
+ v8_url = 'http://v8.googlecode.com/svn'
+ v8_branch_url = (v8_url + '/branches')
+ v8_bleeding_edge = v8_branch_url + '/bleeding_edge'
+ v8_trunk = v8_url + '/trunk'
+ es5conform_root_url = "https://es5conform.svn.codeplex.com/svn/"
+ es5conform_revision = 62998
+
+ dart_url = config_private.Master.googlecode_url % 'dart'
+ dart_bleeding = dart_url + '/branches/bleeding_edge'
+ dart_trunk = dart_url + '/trunk'
+
+ oilpan_url = (config_private.Master.webkit_root_url + '/branches/oilpan')
+
+ skia_url = 'http://skia.googlecode.com/svn/'
+
+ syzygy_url = 'http://sawbuck.googlecode.com/svn/'
+
+ webrtc_url = config_private.Master.googlecode_url % 'webrtc'
+ libyuv_url = 'http://libyuv.googlecode.com/svn'
+
+ # Default target platform if none was given to the factory.
+ default_platform = 'win32'
+
+ # Used by the waterfall display.
+ project_url = 'http://www.chromium.org'
+
+ # Base URL for perf test results.
+ perf_base_url = 'http://build.chromium.org/f/chromium/perf'
+
+ # Suffix for perf URL.
+ perf_report_url_suffix = 'report.html?history=150'
+
+ # Directory in which to save perf-test output data files.
+ perf_output_dir = '~/www/perf'
+
+ # URL pointing to builds and test results.
+ archive_url = 'http://build.chromium.org/buildbot'
+
+ # The test results server to upload our test results.
+ test_results_server = 'test-results.appspot.com'
+
+ # File in which to save a list of graph names.
+ perf_graph_list = 'graphs.dat'
+
+ # Magic step return code inidicating "warning(s)" rather than "error".
+ retcode_warnings = 88
+
+ @staticmethod
+ def GetBotPassword():
+ """Returns the slave password retrieved from a local file, or None.
+
+ The slave password is loaded from a local file next to this module file, if
+ it exists. This is a function rather than a variable so it's not called
+ when it's not needed.
+
+ We can't both make this a property and also keep it static unless we use a
+ <metaclass, which is overkill for this usage.
+ """
+ # Note: could be overriden by config_private.
+ if not getattr(Master, 'bot_password', None):
+ # If the bot_password has been requested, the file is required to exist
+ # if not overriden in config_private.
+ bot_password_path = os.path.join(BASE_DIR, '.bot_password')
+ Master.bot_password = open(bot_password_path).read().strip('\n\r')
+ return Master.bot_password
+
+ @staticmethod
+ def _extract_masters(master):
+ return [v for v in master.__dict__.itervalues()
+ if (inspect.isclass(v) and
+ issubclass(v, config_private.Master.Base) and
+ v != config_private.Master.Base)]
+
+ @classmethod
+ def get_base_masters(cls):
+ masters = []
+ for base_master in BASE_MASTERS:
+ masters += cls._extract_masters(base_master)
+ return masters
+
+ @classmethod
+ def get_all_masters(cls):
+ return cls._extract_masters(cls)
diff --git a/infra/scripts/legacy/site_config/config_default.py b/infra/scripts/legacy/site_config/config_default.py
new file mode 100644
index 0000000..8d0cb0a
--- /dev/null
+++ b/infra/scripts/legacy/site_config/config_default.py
@@ -0,0 +1,230 @@
+# Copyright 2012 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""Seeds a number of variables defined in chromium_config.py.
+
+The recommended way is to fork this file and use a custom DEPS forked from
+config/XXX/DEPS with the right configuration data."""
+
+import os
+import re
+import socket
+
+
+SERVICE_ACCOUNTS_PATH = '/creds/service_accounts'
+
+
+class classproperty(object):
+ """A decorator that allows is_production_host to only to be defined once."""
+ def __init__(self, getter):
+ self.getter = getter
+ def __get__(self, instance, owner):
+ return self.getter(owner)
+
+
+class Master(object):
+ # Repository URLs used by the SVNPoller and 'gclient config'.
+ server_url = 'http://src.chromium.org'
+ repo_root = '/svn'
+ git_server_url = 'https://chromium.googlesource.com'
+
+ # External repos.
+ googlecode_url = 'http://%s.googlecode.com/svn'
+ sourceforge_url = 'https://svn.code.sf.net/p/%(repo)s/code'
+ googlecode_revlinktmpl = 'https://code.google.com/p/%s/source/browse?r=%s'
+
+ # Directly fetches from anonymous Blink svn server.
+ webkit_root_url = 'http://src.chromium.org/blink'
+ nacl_trunk_url = 'http://src.chromium.org/native_client/trunk'
+
+ llvm_url = 'http://llvm.org/svn/llvm-project'
+
+ # Perf Dashboard upload URL.
+ dashboard_upload_url = 'https://chromeperf.appspot.com'
+
+ # Actually for Chromium OS slaves.
+ chromeos_url = git_server_url + '/chromiumos.git'
+
+ # Default domain for emails to come from and
+ # domains to which emails can be sent.
+ master_domain = 'example.com'
+ permitted_domains = ('example.com',)
+
+ # Your smtp server to enable mail notifications.
+ smtp = 'smtp'
+
+ # By default, bot_password will be filled in by config.GetBotPassword().
+ bot_password = None
+
+ # Fake urls to make various factories happy.
+ trunk_internal_url = None
+ trunk_internal_url_src = None
+ slave_internal_url = None
+ git_internal_server_url = None
+ syzygy_internal_url = None
+ v8_internal_url = None
+
+
+ class Base(object):
+ """Master base template.
+ Contains stubs for variables that all masters must define."""
+
+ # Base service offset for 'master_port'
+ MASTER_PORT = 2
+ # Base service offset for 'slave_port'
+ SLAVE_PORT = 3
+ # Base service offset for 'master_port_alt'
+ MASTER_PORT_ALT = 4
+ # Base service offset for 'try_job_port'
+ TRY_JOB_PORT = 5
+
+ # A BuildBucket bucket to poll.
+ buildbucket_bucket = None
+
+ # Master address. You should probably copy this file in another svn repo
+ # so you can override this value on both the slaves and the master.
+ master_host = 'localhost'
+ @classproperty
+ def current_host(cls):
+ return socket.getfqdn()
+ @classproperty
+ def in_production(cls):
+ return re.match(r'master.*\.golo\.chromium\.org', cls.current_host)
+ # Only report that we are running on a master if the master_host (even when
+ # master_host is overridden by a subclass) is the same as the current host.
+ @classproperty
+ def is_production_host(cls):
+ return cls.current_host == cls.master_host
+
+ # 'from:' field for emails sent from the server.
+ from_address = 'nobody@example.com'
+ # Additional email addresses to send gatekeeper (automatic tree closage)
+ # notifications. Unnecessary for experimental masters and try servers.
+ tree_closing_notification_recipients = []
+
+ @classproperty
+ def master_port(cls):
+ return cls._compose_port(cls.MASTER_PORT)
+
+ @classproperty
+ def slave_port(cls):
+ # Which port slaves use to connect to the master.
+ return cls._compose_port(cls.SLAVE_PORT)
+
+ @classproperty
+ def master_port_alt(cls):
+ # The alternate read-only page. Optional.
+ return cls._compose_port(cls.MASTER_PORT_ALT)
+
+ @classproperty
+ def try_job_port(cls):
+ return cls._compose_port(cls.TRY_JOB_PORT)
+
+ @classmethod
+ def _compose_port(cls, service):
+ """Returns: The port number for 'service' from the master's static config.
+
+ Port numbers are mapped of the form:
+ XYYZZ
+ || \__The last two digits identify the master, e.g. master.chromium
+ |\____The second and third digits identify the master host, e.g.
+ | master1.golo
+ \_____The first digit identifies the port type, e.g. master_port
+
+ If any configuration is missing (incremental migration), this method will
+ return '0' for that query, indicating no port.
+ """
+ return (
+ (service * 10000) + # X
+ (cls.master_port_base * 100) + # YY
+ cls.master_port_id) # ZZ
+
+ service_account_file = None
+
+ @classproperty
+ def service_account_path(cls):
+ if cls.service_account_file is None:
+ return None
+ return os.path.join(SERVICE_ACCOUNTS_PATH, cls.service_account_file)
+
+ ## Per-master configs.
+
+ class Master1(Base):
+ """Chromium master."""
+ master_host = 'master1.golo.chromium.org'
+ master_port_base = 1
+ from_address = 'buildbot@chromium.org'
+ tree_closing_notification_recipients = [
+ 'chromium-build-failure@chromium-gatekeeper-sentry.appspotmail.com']
+ base_app_url = 'https://chromium-status.appspot.com'
+ tree_status_url = base_app_url + '/status'
+ store_revisions_url = base_app_url + '/revisions'
+ last_good_url = base_app_url + '/lkgr'
+ last_good_blink_url = 'http://blink-status.appspot.com/lkgr'
+
+ class Master2(Base):
+ """Legacy ChromeOS master."""
+ master_host = 'master2.golo.chromium.org'
+ master_port_base = 2
+ tree_closing_notification_recipients = [
+ 'chromeos-build-failures@google.com']
+ from_address = 'buildbot@chromium.org'
+
+ class Master2a(Base):
+ """Chromeos master."""
+ master_host = 'master2a.golo.chromium.org'
+ master_port_base = 15
+ tree_closing_notification_recipients = [
+ 'chromeos-build-failures@google.com']
+ from_address = 'buildbot@chromium.org'
+
+ class Master3(Base):
+ """Client master."""
+ master_host = 'master3.golo.chromium.org'
+ master_port_base = 3
+ tree_closing_notification_recipients = []
+ from_address = 'buildbot@chromium.org'
+
+ class Master4(Base):
+ """Try server master."""
+ master_host = 'master4.golo.chromium.org'
+ master_port_base = 4
+ tree_closing_notification_recipients = []
+ from_address = 'tryserver@chromium.org'
+ code_review_site = 'https://codereview.chromium.org'
+
+ class Master4a(Base):
+ """Try server master."""
+ master_host = 'master4a.golo.chromium.org'
+ master_port_base = 14
+ tree_closing_notification_recipients = []
+ from_address = 'tryserver@chromium.org'
+ code_review_site = 'https://codereview.chromium.org'
+
+ ## Native Client related
+
+ class NaClBase(Master3):
+ """Base class for Native Client masters."""
+ tree_closing_notification_recipients = ['bradnelson@chromium.org']
+ base_app_url = 'https://nativeclient-status.appspot.com'
+ tree_status_url = base_app_url + '/status'
+ store_revisions_url = base_app_url + '/revisions'
+ last_good_url = base_app_url + '/lkgr'
+ perf_base_url = 'http://build.chromium.org/f/client/perf'
+
+ ## ChromiumOS related
+
+ class ChromiumOSBase(Master2):
+ """Legacy base class for ChromiumOS masters"""
+ base_app_url = 'https://chromiumos-status.appspot.com'
+ tree_status_url = base_app_url + '/status'
+ store_revisions_url = base_app_url + '/revisions'
+ last_good_url = base_app_url + '/lkgr'
+
+ class ChromiumOSBase2a(Master2a):
+ """Base class for ChromiumOS masters"""
+ base_app_url = 'https://chromiumos-status.appspot.com'
+ tree_status_url = base_app_url + '/status'
+ store_revisions_url = base_app_url + '/revisions'
+ last_good_url = base_app_url + '/lkgr'
diff --git a/infra/scripts/runtest_wrapper.py b/infra/scripts/runtest_wrapper.py
index 1311c64..825a4ab 100755
--- a/infra/scripts/runtest_wrapper.py
+++ b/infra/scripts/runtest_wrapper.py
@@ -8,24 +8,36 @@ which file gets used and test the changes on trybots before landing."""
import argparse
+import copy
import os
import subprocess
import sys
+SRC_DIR = os.path.dirname(os.path.dirname(os.path.dirname(__file__)))
+
+
def main(argv):
parser = argparse.ArgumentParser()
+ # TODO(phajdan.jr): Remove after cleaning up build repo side.
parser.add_argument(
- '--path-build', required=True, help='Path to the build repo')
+ '--path-build', help='Path to the build repo')
parser.add_argument('args', nargs='*', help='Arguments to pass to runtest.py')
args = parser.parse_args(argv)
+
+ env = copy.copy(os.environ)
+ pythonpath = env.get('PYTHONPATH', '').split(':')
+ pythonpath.append(os.path.join(
+ SRC_DIR, 'infra', 'scripts', 'legacy', 'scripts'))
+ pythonpath.append(os.path.join(
+ SRC_DIR, 'infra', 'scripts', 'legacy', 'site_config'))
+ env['PYTHONPATH'] = ':'.join(pythonpath)
+
return subprocess.call([
sys.executable,
- os.path.join(args.path_build, 'scripts', 'tools', 'runit.py'),
- '--show-path',
- sys.executable,
- os.path.join(args.path_build, 'scripts', 'slave', 'runtest.py')
- ] + args.args)
+ os.path.join(SRC_DIR, 'infra', 'scripts', 'legacy',
+ 'scripts', 'slave', 'runtest.py')
+ ] + args.args, env=env)
if __name__ == '__main__':