summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--.gitignore2
-rw-r--r--chrome/chrome_tests.gypi11
-rw-r--r--chrome/test/data/webrtc/video_extraction.js243
-rw-r--r--chrome/test/data/webrtc/webrtc_video_quality_test.html48
-rw-r--r--chrome/test/functional/PYAUTO_TESTS1
-rwxr-xr-xchrome/test/functional/webrtc_call.py78
-rwxr-xr-xchrome/test/functional/webrtc_test_base.py80
-rwxr-xr-xchrome/test/functional/webrtc_video_quality.py434
-rw-r--r--chrome/test/functional/webrtc_write_wsh.py74
9 files changed, 904 insertions, 67 deletions
diff --git a/.gitignore b/.gitignore
index dcb1cee..ccd4557 100644
--- a/.gitignore
+++ b/.gitignore
@@ -32,6 +32,7 @@
.gdbinit
.metadata
.project
+.pydevproject
# Settings directory for eclipse
/.settings
tags
@@ -200,6 +201,7 @@ v8.log
/third_party/psyco_win32
/third_party/pthreads-win32
/third_party/pyftpdlib/src
+/third_party/pywebsocket/src
/third_party/pylib
/third_party/pymox/src
/third_party/python_24
diff --git a/chrome/chrome_tests.gypi b/chrome/chrome_tests.gypi
index a1bedff..a6d92e1 100644
--- a/chrome/chrome_tests.gypi
+++ b/chrome/chrome_tests.gypi
@@ -4496,6 +4496,17 @@
},
], # actions
}, # target 'pyautolib'
+ {
+ # Required for WebRTC PyAuto tests.
+ 'target_name': 'webrtc_test_tools',
+ 'type': 'none',
+ 'dependencies': [
+ 'pyautolib',
+ '../third_party/libjingle/libjingle.gyp:peerconnection_server',
+ '../third_party/webrtc/tools/tools.gyp:frame_analyzer',
+ '../third_party/webrtc/tools/tools.gyp:rgba_to_i420_converter',
+ ],
+ }, # target 'webrtc_test_tools'
] # targets
}],
# To enable the coverage targets, do
diff --git a/chrome/test/data/webrtc/video_extraction.js b/chrome/test/data/webrtc/video_extraction.js
new file mode 100644
index 0000000..a521d50
--- /dev/null
+++ b/chrome/test/data/webrtc/video_extraction.js
@@ -0,0 +1,243 @@
+/**
+ * Copyright (c) 2012 The Chromium Authors. All rights reserved.
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+/**
+ * The ID of the video tag from which frames are captured.
+ * @private
+ */
+var gVideoId = 'remote_view';
+
+/**
+ * Counts the number of frames that have been captured. Used in timeout
+ * adjustments.
+ * @private
+ */
+var gFrameCounter = 0;
+
+/**
+ * The gStartOfTime when the capturing begins. Used for timeout adjustments.
+ * @private
+ */
+var gStartOfTime = 0;
+
+/**
+ * The duration of the all frame capture in milliseconds.
+ * @private
+ */
+var gCaptureDuration = 0;
+
+/**
+ * The time interval at which the video is sampled.
+ * @private
+ */
+var gFrameCaptureInterval = 0;
+
+/**
+ * The global array of frames. Frames are pushed, i.e. this should be treated as
+ * a queue and we should read from the start.
+ * @private
+ */
+var gFrames = new Array();
+
+/**
+ * The WebSocket connection to the PyWebSocket server.
+ * @private
+ */
+var gWebSocket = null;
+
+/**
+ * A flag to show whether the WebSocket is open;
+ * @private
+ */
+var gWebSocketOpened = false;
+
+/**
+ * We need to skip the first two frames due to timing issues. This flags helps
+ * us determine weather or not to skip them.
+ * @private
+ */
+var gFrameIntervalAdjustment = false;
+
+/**
+ * We need this global variable to syncronyze with PyAuto how long to run the
+ * call between the two peers.
+ */
+var dDoneFrameCapturing = false;
+
+/**
+ * Upon load of the window opens the WebSocket to the PyWebSocket server. The
+ * server should already be up and running.
+ */
+window.onload = function() {
+ tryOpeningWebSocket();
+}
+
+/**
+ * Starts the frame capturing.
+ *
+ * @param {Number} The width of the video/canvas area to be captured.
+ * @param {Number} The height of the video area to be captured.
+ * @param {Number} The height of the canvas where we put the video frames.
+ * @param {Number} The frame rate at which we would like to capture frames.
+ * @param {Number} The duration of the frame capture in seconds.
+ */
+function startFrameCapture(width, height, canvas_height, frame_rate, duration){
+ gFrameCaptureInterval = 1000/frame_rate;
+ gCaptureDuration = 1000 * duration;
+
+ console.log('Received width is: ' + width + ', received height is: ' + height
+ + ', capture interval is: ' + gFrameCaptureInterval +
+ ', duration is: ' + gCaptureDuration);
+ gStartOfTime = new Date().getTime();
+ setTimeout(function() { shoot(width, height, canvas_height); },
+ gFrameCaptureInterval);
+}
+
+/**
+ * Captures an image frame from the provided video element.
+ *
+ * @param {Video} video HTML5 video element from where the image frame will
+ * be captured.
+ * @param {Number} The width of the video/canvas area to be captured.
+ * @param {Number} The height of the video/canvas area to be captured.
+ *
+ * @return {Canvas}
+ */
+function capture(video, width, height) {
+ var canvas = document.getElementById('remote_canvas');
+ var ctx = canvas.getContext('2d');
+ ctx.drawImage(video, 0, 0, width, height);
+ return canvas;
+}
+
+/**
+ * The function which is called at the end of every gFrameCaptureInterval. Gets
+ * the current frame from the video and extracts the data from it. Then it saves
+ * it in the frames array and adjusts the capture interval (timers in JavaScript
+ * aren't precise).
+ *
+ * @param {Number} The width of the video/canvas area to be captured.
+ * @param {Number} The height of the video area to be captured.
+ * @param {Number} The height of the canvas where we put the video frames.
+ */
+function shoot(width, height, canvas_height){
+ // The first two captured frames have big difference between the ideal time
+ // interval between two frames and the real one. As a consequence this affects
+ // enormously the interval adjustment for subsequent frames. That's why we
+ // have to reset the time after the first two frames and get rid of these two
+ // frames.
+ if (gFrameCounter == 1 && !gFrameIntervalAdjustment) {
+ gStartOfTime = new Date().getTime();
+ gFrameCounter = 0;
+ gFrameIntervalAdjustment = true;
+ gFrames.pop();
+ gFrames.pop();
+ }
+ var video = document.getElementById(gVideoId);
+ var canvas = capture(video, width, height);
+
+ // Extract the data from the canvas.
+ var ctx = canvas.getContext('2d');
+ var img;
+ if (height == canvas_height) {
+ // We capture the whole video frame.
+ img = ctx.getImageData(0, 0, width, height);
+ } else {
+ // We capture only the barcode (canvas_height is the height of the barcode).
+ img = ctx.getImageData(0, 0, width, canvas_height);
+ }
+ gFrames.push(img.data.buffer);
+ gFrameCounter++;
+
+ // Adjust the timer.
+ var current_time = new Date().getTime();
+ var ideal_time = gFrameCounter*gFrameCaptureInterval;
+ var real_time_elapsed = current_time - gStartOfTime;
+ var diff = real_time_elapsed - ideal_time;
+
+ if (real_time_elapsed < gCaptureDuration) {
+ // If duration isn't over shoot again
+ setTimeout(function() { shoot(width, height, canvas_height); },
+ gFrameCaptureInterval - diff);
+ } else { // Else reset gFrameCounter and send the frames
+ dDoneFrameCapturing = true;
+ gFrameCounter = 0;
+ sendFrames();
+ }
+}
+
+function doneFrameCapturing() {
+ if (dDoneFrameCapturing) {
+ returnToPyAuto('done-capturing');
+ } else {
+ returnToPyAuto('still-capturing');
+ }
+}
+
+/**
+ * Send the frames to the remote PyWebSocket server. Use setTimeout to regularly
+ * try to send the frames.
+ */
+function sendFrames() {
+ if (!gWebSocketOpened) {
+ console.log('WebSocket connection is not yet open');
+ setTimeout(function() { sendFrames(); }, 100);
+ }
+
+ if (gFrames.length > 0) {
+ var frame = gFrames.shift();
+ gWebSocket.send(frame);
+ gFrameCounter++;
+ setTimeout(function() { sendFrames(); }, 100);
+ } else {
+ console.log('Finished sending out frames');
+ }
+}
+
+/**
+ * Function checking whether there are more frames to send to the pywebsocket
+ * server.
+ */
+function haveMoreFramesToSend() {
+ if (gFrames.length == 0) {
+ returnToPyAuto('no-more-frames');
+ } else {
+ returnToPyAuto('still-have-frames');
+ }
+}
+
+/**
+ * Continuously tries to open a WebSocket to the pywebsocket server.
+ */
+function tryOpeningWebSocket() {
+ if (!gWebSocketOpened) {
+ console.log('Once again trying to open web socket');
+ openWebSocket();
+ setTimeout(function() { tryOpeningWebSocket(); }, 1000);
+ }
+}
+
+/**
+ * Open the WebSocket connection and register some events.
+ */
+function openWebSocket() {
+ if (!gWebSocketOpened) {
+ gWebSocket = new WebSocket('ws://localhost:12221/webrtc_write');
+ }
+
+ gWebSocket.onopen = function () {
+ console.log('Opened WebSocket connection');
+ gWebSocketOpened = true;
+ };
+
+ gWebSocket.onerror = function (error) {
+ console.log('WebSocket Error ' + error);
+ };
+
+ gWebSocket.onmessage = function (e) {
+ console.log('Server says: ' + e.data);
+ };
+}
diff --git a/chrome/test/data/webrtc/webrtc_video_quality_test.html b/chrome/test/data/webrtc/webrtc_video_quality_test.html
new file mode 100644
index 0000000..a399f17
--- /dev/null
+++ b/chrome/test/data/webrtc/webrtc_video_quality_test.html
@@ -0,0 +1,48 @@
+<!DOCTYPE HTML PUBLIC "-//IETF//DTD HTML//EN">
+<html>
+<head>
+ <title>WebRTC PyAuto Test</title>
+ <script type="text/javascript" src="test_functions.js"></script>
+ <script type="text/javascript" src="message_handling.js"></script>
+ <script type="text/javascript" src="getusermedia.js"></script>
+ <script type="text/javascript" src="jsep_call.js"></script>
+ <script type="text/javascript" src="video_extraction.js"></script>
+</head>
+<body>
+ <table border="0">
+ <tr>
+ <td>Local Preview</td>
+ <td>Remote Video</td>
+ </tr>
+ <tr>
+ <td>
+ <video width="640" height="480" id="local_view"
+ autoplay="autoplay"></video>
+ </td>
+ <td>
+ <!-- startFrameCapture() takes 5 parameters:
+ 1. width: width of the video/canvas area.
+ 2. height: height of the video area.
+ 3. canvas_height: Height of the canvas.
+ 4. fps: fps at which we would like to sample.
+ 5. duration: The duration of the capturing. -->
+ <video width="640" height="480" id="remote_view"
+ autoplay="autoplay" onplay="startFrameCapture(640,480,480,30,5)">
+ </video>
+ </td>
+ </tr>
+ <tr>
+ <td></td>
+ <td>
+ <div id="output" style="display: inline-block;
+ position: relative; width: 640; height: 480">
+ <!-- Canvas height should be equal to video height if we want to
+ capture the whole frames. If we only want to capture the barcode,
+ canvas height should equal the barcode height. -->
+ <canvas id="remote_canvas" width="640" height="480"></canvas>
+ </div>
+ </td>
+ </tr>
+ </table>
+</body>
+</html> \ No newline at end of file
diff --git a/chrome/test/functional/PYAUTO_TESTS b/chrome/test/functional/PYAUTO_TESTS
index 296f44b..8cf0fa7 100644
--- a/chrome/test/functional/PYAUTO_TESTS
+++ b/chrome/test/functional/PYAUTO_TESTS
@@ -717,6 +717,7 @@
'media_stream_infobar',
'webrtc_brutality_test',
'webrtc_call',
+ 'webrtc_video_quality',
],
},
diff --git a/chrome/test/functional/webrtc_call.py b/chrome/test/functional/webrtc_call.py
index 6af6466..5dc7c7e 100755
--- a/chrome/test/functional/webrtc_call.py
+++ b/chrome/test/functional/webrtc_call.py
@@ -11,10 +11,6 @@ import pyauto
import webrtc_test_base
-class MissingRequiredBinaryException(Exception):
- pass
-
-
class WebrtcCallTest(webrtc_test_base.WebrtcTestBase):
"""Test we can set up a WebRTC call and disconnect it.
@@ -29,29 +25,12 @@ class WebrtcCallTest(webrtc_test_base.WebrtcTestBase):
trunk/talk/examples/peerconnection/server).
"""
- def ExtraChromeFlags(self):
- """Adds flags to the Chrome command line."""
- extra_flags = ['--enable-media-stream', '--enable-peer-connection']
- return pyauto.PyUITest.ExtraChromeFlags(self) + extra_flags
-
def setUp(self):
pyauto.PyUITest.setUp(self)
-
- # Start the peerconnection_server. This must be built before running the
- # test, and we assume the binary ends up next to the Chrome binary.
- binary_path = os.path.join(self.BrowserPath(), 'peerconnection_server')
- if self.IsWin():
- binary_path += '.exe'
- if not os.path.exists(binary_path):
- raise MissingRequiredBinaryException(
- 'Could not locate peerconnection_server. Have you built the '
- 'peerconnection_server target? We expect to have a '
- 'peerconnection_server binary next to the chrome binary.')
-
- self._server_process = subprocess.Popen(binary_path)
+ self.StartPeerConnectionServer()
def tearDown(self):
- self._server_process.kill()
+ self.StopPeerConnectionServer()
pyauto.PyUITest.tearDown(self)
self.assertEquals('', self.CheckErrorsAndCrashes())
@@ -84,21 +63,21 @@ class WebrtcCallTest(webrtc_test_base.WebrtcTestBase):
self.assertEquals('ok-got-stream', self.GetUserMedia(tab_index=0))
self.assertEquals('ok-got-stream', self.GetUserMedia(tab_index=1))
- self._Connect('user_1', tab_index=0)
- self._Connect('user_2', tab_index=1)
+ self.Connect('user_1', tab_index=0)
+ self.Connect('user_2', tab_index=1)
- self._EstablishCall(from_tab_with_index=0)
+ self.EstablishCall(from_tab_with_index=0)
self._StartDetectingVideo(tab_index=0, video_element='remote_view')
self._WaitForVideoToPlay()
# The hang-up will automatically propagate to the second tab.
- self._HangUp(from_tab_with_index=0)
- self._WaitUntilHangUpVerified(tab_index=1)
+ self.HangUp(from_tab_with_index=0)
+ self.VerifyHungUp(tab_index=1)
- self._Disconnect(tab_index=0)
- self._Disconnect(tab_index=1)
+ self.Disconnect(tab_index=0)
+ self.Disconnect(tab_index=1)
# Ensure we didn't miss any errors.
self.AssertNoFailures(tab_index=0)
@@ -131,49 +110,16 @@ class WebrtcCallTest(webrtc_test_base.WebrtcTestBase):
self.GetUserMedia(tab_index=0)
self.GetUserMedia(tab_index=1)
- self._Connect("user_1", tab_index=0)
- self._Connect("user_2", tab_index=1)
+ self.Connect("user_1", tab_index=0)
+ self.Connect("user_2", tab_index=1)
- self._EstablishCall(from_tab_with_index=0)
+ self.EstablishCall(from_tab_with_index=0)
self.assertEquals('failed-with-error-1',
self.GetUserMedia(tab_index=0, action='deny'))
self.assertEquals('failed-with-error-1',
self.GetUserMedia(tab_index=0, action='dismiss'))
- def _Connect(self, user_name, tab_index):
- self.assertEquals('ok-connected', self.ExecuteJavascript(
- 'connect("http://localhost:8888", "%s")' % user_name,
- tab_index=tab_index))
- self.AssertNoFailures(tab_index)
-
- def _EstablishCall(self, from_tab_with_index):
- self.assertEquals('ok-call-established', self.ExecuteJavascript(
- 'call()', tab_index=from_tab_with_index))
- self.AssertNoFailures(from_tab_with_index)
-
- # Double-check the call reached the other side.
- self.assertEquals('yes', self.ExecuteJavascript(
- 'is_call_active()', tab_index=from_tab_with_index))
-
- def _HangUp(self, from_tab_with_index):
- self.assertEquals('ok-call-hung-up', self.ExecuteJavascript(
- 'hangUp()', tab_index=from_tab_with_index))
- self._WaitUntilHangUpVerified(tab_index=from_tab_with_index)
- self.AssertNoFailures(tab_index=from_tab_with_index)
-
- def _WaitUntilHangUpVerified(self, tab_index):
- hung_up = self.WaitUntil(
- function=lambda: self.ExecuteJavascript('is_call_active()',
- tab_index=tab_index),
- expect_retval='no')
- self.assertTrue(hung_up,
- msg='Timed out while waiting for hang-up to be confirmed.')
-
- def _Disconnect(self, tab_index):
- self.assertEquals('ok-disconnected', self.ExecuteJavascript(
- 'disconnect()', tab_index=tab_index))
-
def _StartDetectingVideo(self, tab_index, video_element):
self.assertEquals('ok-started', self.ExecuteJavascript(
'startDetection("%s", "frame_buffer", 320, 240)' % video_element,
diff --git a/chrome/test/functional/webrtc_test_base.py b/chrome/test/functional/webrtc_test_base.py
index 999c821..202e3061 100755
--- a/chrome/test/functional/webrtc_test_base.py
+++ b/chrome/test/functional/webrtc_test_base.py
@@ -3,11 +3,23 @@
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
+import os
+import subprocess
+
import pyauto
+class MissingRequiredBinaryException(Exception):
+ pass
+
+
class WebrtcTestBase(pyauto.PyUITest):
- """This base class provides helpers for getUserMedia calls."""
+ """This base class provides helpers for WebRTC calls."""
+
+ def ExtraChromeFlags(self):
+ """Adds flags to the Chrome command line."""
+ extra_flags = ['--enable-media-stream', '--enable-peer-connection']
+ return pyauto.PyUITest.ExtraChromeFlags(self) + extra_flags
def GetUserMedia(self, tab_index, action='allow'):
"""Acquires webcam or mic for one tab and returns the result.
@@ -64,3 +76,69 @@ class WebrtcTestBase(pyauto.PyUITest):
"""
self.assertEquals('ok-no-errors', self.ExecuteJavascript(
'getAnyTestFailures()', tab_index=tab_index))
+
+ def Connect(self, user_name, tab_index):
+ self.assertEquals('ok-connected', self.ExecuteJavascript(
+ 'connect("http://localhost:8888", "%s")' % user_name,
+ tab_index=tab_index))
+ self.AssertNoFailures(tab_index)
+
+ def EstablishCall(self, from_tab_with_index):
+ self.assertEquals('ok-call-established', self.ExecuteJavascript(
+ 'call()', tab_index=from_tab_with_index))
+ self.AssertNoFailures(from_tab_with_index)
+
+ # Double-check the call reached the other side.
+ self.assertEquals('yes', self.ExecuteJavascript(
+ 'is_call_active()', tab_index=from_tab_with_index))
+
+ def HangUp(self, from_tab_with_index):
+ self.assertEquals('ok-call-hung-up', self.ExecuteJavascript(
+ 'hangUp()', tab_index=from_tab_with_index))
+ self.VerifyHungUp(from_tab_with_index)
+ self.AssertNoFailures(from_tab_with_index)
+
+ def VerifyHungUp(self, tab_index):
+ self.assertEquals('no', self.ExecuteJavascript(
+ 'is_call_active()', tab_index=tab_index))
+
+ def Disconnect(self, tab_index):
+ self.assertEquals('ok-disconnected', self.ExecuteJavascript(
+ 'disconnect()', tab_index=tab_index))
+
+ def BinPathForPlatform(self, path):
+ """Form a platform specific path to a binary.
+
+ Args:
+ path(string): The path to the binary without an extension.
+ Return:
+ (string): The platform-specific bin path.
+ """
+ if self.IsWin():
+ path += '.exe'
+ return path
+
+ def StartPeerConnectionServer(self):
+ """Starts peerconnection_server.
+
+ Peerconnection_server is a custom binary allowing two WebRTC clients to find
+ each other. For more details, see the source code which is available at the
+ site http://code.google.com/p/libjingle/source/browse/ (make sure to browse
+ to trunk/talk/examples/peerconnection/server).
+ """
+ # Start the peerconnection_server. It should be next to chrome.
+ binary_path = os.path.join(self.BrowserPath(), 'peerconnection_server')
+ binary_path = self.BinPathForPlatform(binary_path)
+
+ if not os.path.exists(binary_path):
+ raise MissingRequiredBinaryException(
+ 'Could not locate peerconnection_server. Have you built the '
+ 'peerconnection_server target? We expect to have a '
+ 'peerconnection_server binary next to the chrome binary.')
+
+ self._server_process = subprocess.Popen(binary_path)
+
+ def StopPeerConnectionServer(self):
+ """Stops the peerconnection_server."""
+ assert self._server_process
+ self._server_process.kill() \ No newline at end of file
diff --git a/chrome/test/functional/webrtc_video_quality.py b/chrome/test/functional/webrtc_video_quality.py
new file mode 100755
index 0000000..5fcb406
--- /dev/null
+++ b/chrome/test/functional/webrtc_video_quality.py
@@ -0,0 +1,434 @@
+#!/usr/bin/env python
+# Copyright (c) 2012 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import os
+import subprocess
+import sys
+
+import pyauto_functional
+import pyauto
+import pyauto_utils
+import webrtc_test_base
+
+# If you change the port number, don't forget to modify video_extraction.js too.
+_PYWEBSOCKET_PORT_NUMBER = '12221'
+
+
+class MissingRequiredToolException(Exception):
+ pass
+
+
+class WebrtcVideoQualityTest(webrtc_test_base.WebrtcTestBase):
+ """Test the video quality of the WebRTC output.
+
+ Prerequisites: This test case must run on a machine with a webcam, either
+ fake or real, and with some kind of audio device. You must make the
+ peerconnection_server target before you run.
+
+ The test case will launch a custom binary (peerconnection_server) which will
+ allow two WebRTC clients to find each other.
+
+ The test also runs several other custom binaries - rgba_to_i420 converter and
+ frame_analyzer. Both tools can be found under third/party/webrtc/tools. The
+ test also runs a standalone Python implementation of a WebSocket server
+ (pywebsocket) and a barcode_decoder script.
+ """
+
+ def setUp(self):
+ pyauto.PyUITest.setUp(self)
+ self.StartPeerConnectionServer()
+
+ def tearDown(self):
+ self.StopPeerConnectionServer()
+
+ pyauto.PyUITest.tearDown(self)
+ self.assertEquals('', self.CheckErrorsAndCrashes())
+
+ def _WebRtcCallWithHelperPage(self, test_page, helper_page):
+
+ """Tests we can call, let run for some time and hang up with WebRTC.
+
+ This test exercises pretty much the whole happy-case for the WebRTC
+ JavaScript API. Currently, it exercises a normal call setup using the API
+ defined at http://dev.w3.org/2011/webrtc/editor/webrtc.html. The API is
+ still evolving.
+
+ The test will load the supplied HTML file, which in turn will load different
+ javascript files depending on which version of the signaling protocol
+ we are running.
+ The supplied HTML files will be loaded in two tabs and tell the web
+ pages to start up WebRTC, which will acquire video and audio devices on the
+ system. This will launch a dialog in Chrome which we click past using the
+ automation controller. Then, we will order both tabs to connect the server,
+ which will make the two tabs aware of each other. Once that is done we order
+ one tab to call the other.
+
+ We make sure that the javascript tells us that the call succeeded, lets it
+ run for some time and try to hang up the call after that. While the call is
+ running, we capture frames with the help of the functions in the
+ video_extraction.js file.
+
+ Args:
+ test_page(string): The name of the test HTML page. It is looked for in the
+ webrtc directory under chrome/test/data.
+ helper_page(string): The name of the helper HTML page. It is looked for in
+ the same directory as the test_page.
+ """
+ assert helper_page
+ url = self.GetFileURLForDataPath('webrtc', test_page)
+ helper_url = self.GetFileURLForDataPath('webrtc', helper_page)
+
+ # Start the helper page in the first tab
+ self.NavigateToURL(helper_url)
+
+ # Start the test page in the second page.
+ self.AppendTab(pyauto.GURL(url))
+
+ self.assertEquals('ok-got-stream', self.GetUserMedia(tab_index=0))
+ self.assertEquals('ok-got-stream', self.GetUserMedia(tab_index=1))
+ self.Connect('user_1', tab_index=0)
+ self.Connect('user_2', tab_index=1)
+
+ self.EstablishCall(from_tab_with_index=0)
+
+ # Wait for JavaScript to capture all the frames. In the HTML file we specify
+ # how many seconds to capture frames. The default for retry_sleep is 0.25
+ # but there is no need to ask this often whether we are done capturing the
+ # frames. It seems that 1 second is more reasonable retry time.
+ done_capturing = self.WaitUntil(
+ function=lambda: self.ExecuteJavascript('doneFrameCapturing()',
+ tab_index=1),
+ expect_retval='done-capturing', retry_sleep=1.0)
+
+ self.assertTrue(done_capturing,
+ msg='Timed out while waiting frames to be captured.')
+
+ # The hang-up will automatically propagate to the second tab.
+ self.HangUp(from_tab_with_index=0)
+ self.VerifyHungUp(tab_index=1)
+
+ self.Disconnect(tab_index=0)
+ self.Disconnect(tab_index=1)
+
+ # Ensure we didn't miss any errors.
+ self.AssertNoFailures(tab_index=0)
+ self.AssertNoFailures(tab_index=1)
+
+ def testVgaVideoQuality(self):
+ """Tests the WebRTC video output for a VGA video input.
+
+ On the bots we will be running fake webcam driver and we will feed a video
+ with overlaid barcodes. In order to run the analysis on the output, we need
+ to use the original input video as a reference video. We take the name of
+ this file from an environment variable that the bots set.
+ """
+ ref_file = os.environ['PYAUTO_REFERENCE_FILE']
+ self._StartVideoQualityTest(test_page='webrtc_video_quality_test.html',
+ helper_page='webrtc_jsep_test.html',
+ reference_yuv=ref_file, width=640,
+ height=480, barcode_height=32)
+
+
+ def _StartVideoQualityTest(self, reference_yuv,
+ test_page='webrtc_video_quality_test.html',
+ helper_page='webrtc_jsep_test.html',
+ width=640, height=480, barcode_height=32):
+ """Captures video output into a canvas and sends it to a server.
+
+ This test captures the output frames of a WebRTC connection to a canvas and
+ later sends them over WebSocket to a WebSocket server implemented in Python.
+ At the server side we can store the frames for subsequent quality analysis.
+
+ After the frames are sent to the pywebsocket server, we run the RGBA to I420
+ converter, the barcode decoder and finally the frame analyzer. We also print
+ everything to the Perf Graph for visualization
+
+ Args:
+ reference_yuv(string): The name of the reference YUV video that will be
+ used in the analysis.
+ test_page(string): The name of the test HTML page. To be looked for in the
+ webrtc directory under chrome/test/data.
+ helper_page(string): The name of the HTML helper page. To be looked for in
+ the same directory as the test_page.
+ width(int): The width of the test video frames.
+ height(int): The height of the test video frames.
+ barcode_height(int): The height of the barcodes overlaid in every frame of
+ the video.
+ """
+ self._StartPywebsocketServer()
+
+ self._WebRtcCallWithHelperPage(test_page, helper_page)
+
+ # Wait for JavaScript to send all the frames to the server. The test will
+ # have quite a lot of frames to send, so it will take at least several
+ # seconds. Thus there is no need to ask whether there are more frames to
+ # send every fourth of a second.
+ no_more_frames = self.WaitUntil(
+ function=lambda: self.ExecuteJavascript('haveMoreFramesToSend()',
+ tab_index=1),
+ expect_retval='no-more-frames', retry_sleep=0.5, timeout=150)
+ self.assertTrue(no_more_frames,
+ msg='Timed out while waiting for frames to send.')
+
+ self._StopPywebsocketServer()
+
+ self.assertTrue(self._RunRGBAToI420Converter(width, height))
+ self.assertTrue(self._RunBarcodeDecoder(width, height, barcode_height))
+
+ analysis_result = self._RunFrameAnalyzer(width, height, reference_yuv)
+ self._ProcessPsnrAndSsimOutput(analysis_result)
+ self._ProcessFramesCountOutput(analysis_result)
+
+ def _StartPywebsocketServer(self):
+ """Starts the pywebsocket server."""
+ print 'Starting pywebsocket server.'
+ path_to_base = os.path.join(self.BrowserPath(), '..', '..')
+
+ # Pywebsocket source directory.
+ path_pyws_dir = os.path.join(path_to_base, 'third_party', 'WebKit',
+ 'Tools', 'Scripts', 'webkitpy', 'thirdparty')
+
+ # Pywebsocket standalone server.
+ path_to_pywebsocket= os.path.join(path_pyws_dir, 'mod_pywebsocket',
+ 'standalone.py')
+
+ # Path to the data handler to handle data received by the server.
+ path_to_handler = os.path.join(path_to_base, 'chrome', 'test', 'functional')
+
+ # The python interpreter binary.
+ python_interp = sys.executable
+
+ # The pywebsocket start command - we could add --log-level=debug for debug.
+ # -p stands for port, -d stands for root_directory (where the data handlers
+ # are).
+ start_cmd = [python_interp, path_to_pywebsocket,
+ '-p', _PYWEBSOCKET_PORT_NUMBER,
+ '-d', path_to_handler,]
+ env = os.environ
+ # Set PYTHONPATH to include the pywebsocket base directory.
+ env['PYTHONPATH'] = (path_pyws_dir + os.path.pathsep +
+ env.get('PYTHONPATH', ''))
+ handler_output_path = os.path.join(path_to_base, '..', '..')
+ env['PYWS_DIR_FOR_HANDLER_OUTPUT'] = os.path.abspath(handler_output_path)
+
+ # Start the pywebsocket server. The server will not start instantly, so the
+ # code opening websockets to it should take this into account.
+ self._pywebsocket_server = subprocess.Popen(start_cmd, env=env)
+
+ def _StopPywebsocketServer(self):
+ """Stops the running instance of pywebsocket server."""
+ print 'Stopping pywebsocket server.'
+ assert self._pywebsocket_server
+ self._pywebsocket_server.kill()
+
+ def _RunRGBAToI420Converter(self, width, height):
+ """Runs the RGBA to I420 converter.
+
+ The rgba_to_i420_converter is part of the webrtc_pyauto_tools target which
+ should be build prior to running this test. The resulting binary should live
+ next to Chrome.
+
+ Args:
+ width(int): The width of the frames to be converted and stitched together.
+ height(int): The height of the frames to be converted and stitched.
+ Return:
+ (bool): True if the conversion is successful, false otherwise.
+ """
+ path_to_rgba_converter = os.path.join(self.BrowserPath(),
+ 'rgba_to_i420_converter')
+ path_to_rgba_converter = os.path.abspath(path_to_rgba_converter)
+ path_to_rgba_converter = self.BinPathForPlatform(path_to_rgba_converter)
+
+ if not os.path.exists(path_to_rgba_converter):
+ raise webrtc_test_base.MissingRequiredBinaryException(
+ 'Could not locate rgba_to_i420_converter! Did you build the '
+ 'webrtc_pyauto_tools target?')
+
+ # This is where the pywebsocket handler writes the captured frames.
+ frames_dir = os.environ['PYWS_DIR_FOR_HANDLER_OUTPUT']
+
+ # We produce an output file that will later be used as an input to the
+ # barcode decoder and frame analyzer tools.
+ output_file = os.path.join(frames_dir, 'pyauto_output.yuv')
+ start_cmd = [path_to_rgba_converter, '--frames_dir=%s' % frames_dir,
+ '--output_file=%s' % output_file, '--width=%d' % width,
+ '--height=%d' % height, '--delete_frames']
+ print 'Start command: ', ' '.join(start_cmd)
+ rgba_converter = subprocess.Popen(start_cmd, stdout=subprocess.PIPE)
+ output = rgba_converter.communicate()[0]
+ if 'Unsuccessful' in output:
+ print 'Unsuccessful RGBA to I420 conversion'
+ return False
+ return True
+
+ def _RunBarcodeDecoder(self, width, height, barcode_height):
+ """Runs the barcode decoder.
+
+ The barcode decoder decodes the barcode overlaid into every frame of the
+ YUV file produced by rgba_to_i420_converter. It writes the relation between
+ the frame number in this output file and the frame number (the decoded
+ barcode value) in the original input file.
+
+ The barcode decoder uses a big java library for decoding the barcodes called
+ Zxing (Zebra crossing). It is checked in the webrtc-tools repo which isn't
+ synced in Chrome. That's why we check it out next to Chrome using a
+ modified version of the .gclient file and than build the necessary jars for
+ the library to be usable.
+
+ Args:
+ width(int): The frames width of the video to be decoded.
+ height(int): The frames height of the video to be decoded.
+ barcode_height(int): The height of the barcodes overlaid on top of every
+ frame.
+ Return:
+ (bool): True if the decoding was successful, False otherwise.
+ """
+ # The barcode decoder lives in folder barcode_tools next to src.
+ path_to_decoder = os.path.join(self.BrowserPath(), '..', '..', '..',
+ 'barcode_tools', 'barcode_decoder.py')
+ path_to_decoder = os.path.abspath(path_to_decoder)
+
+ if not os.path.exists(path_to_decoder):
+ raise MissingRequiredToolException(
+ 'Could not locate the barcode decoder tool! The barcode decoder '
+ 'decodes the barcodes overlaid on top of every frame of the captured '
+ 'video. As it uses a big Java library, it is checked in the '
+ 'webrtc-tools repo which isn\'t synced in Chrome. Check it out next '
+ 'to Chrome\' src by modifying the .gclient file. Than build the '
+ 'necessary jar files for the library to become usable. You can build '
+ 'them by running the build_zxing.py script.')
+ # The YUV file is the file produced by rgba_to_i420_converter.
+ yuv_file = os.path.join(os.environ['PYWS_DIR_FOR_HANDLER_OUTPUT'],
+ 'pyauto_output.yuv')
+ stats_file = os.path.join(os.environ['PYWS_DIR_FOR_HANDLER_OUTPUT'],
+ 'pyauto_stats.txt')
+ python_interp = sys.executable
+ start_cmd = [python_interp, path_to_decoder, '--yuv_file=%s' % yuv_file,
+ '--yuv_frame_width=%d' % width,
+ '--yuv_frame_height=%d' % height,
+ '--barcode_height=%d' % barcode_height,
+ '--stats_file=%s' % stats_file]
+ print 'Start command: ', ' '.join(start_cmd)
+
+ barcode_decoder = subprocess.Popen(start_cmd, stderr=subprocess.PIPE)
+ error = barcode_decoder.communicate()[1]
+ if error:
+ print 'Error: ', error
+ return False
+ return True
+
+ def _RunFrameAnalyzer(self, width, height, reference_yuv):
+ """Runs the frame analyzer tool for PSNR and SSIM analysis.
+
+ The frame analyzer is also part of the webrtc_pyauto_target. It should be
+ built before running this test. We assume that the binary will end up next
+ to Chrome.
+
+ Frame analyzer prints its output to the standard output from where it has to
+ be read and processed.
+
+ Args:
+ width(int): The width of the video frames to be analyzed.
+ height(int): The height of the video frames to be analyzed.
+ reference_yuv(string): The name of the reference video to be used as a
+ reference during the analysis.
+ Return:
+ (string): The output from the frame_analyzer.
+ """
+ path_to_analyzer = os.path.join(self.BrowserPath(), 'frame_analyzer')
+ path_to_analyzer = os.path.abspath(path_to_analyzer)
+
+ path_to_analyzer = self.BinPathForPlatform(path_to_analyzer)
+
+ if not os.path.exists(path_to_analyzer):
+ raise webrtc_test_base.MissingRequiredBinaryException(
+ 'Could not locate frame_analyzer! Did you build the '
+ 'webrtc_pyauto_tools target?')
+
+ # We assume that the reference file(s) will be in the same directory where
+ # the captured frames, the output and stats files are written.
+ ref_file = os.path.join(os.environ['PYWS_DIR_FOR_HANDLER_OUTPUT'],
+ reference_yuv)
+ test_file = os.path.join(os.environ['PYWS_DIR_FOR_HANDLER_OUTPUT'],
+ 'pyauto_output.yuv')
+ stats_file = os.path.join(os.environ['PYWS_DIR_FOR_HANDLER_OUTPUT'],
+ 'pyauto_stats.txt')
+ start_cmd = [path_to_analyzer, '--reference_file=%s' % ref_file,
+ '--test_file=%s' % test_file, '--stats_file=%s' % stats_file,
+ '--width=%d' % width, '--height=%d' % height]
+ print 'Start command: ', ' '.join(start_cmd)
+
+ frame_analyzer = subprocess.Popen(start_cmd, stdout=subprocess.PIPE,
+ stderr=subprocess.PIPE)
+ output, error = frame_analyzer.communicate()
+ if error:
+ print 'Error: ', error
+ return 'BSTATS undef undef; ESTATS'
+ return output
+
+ def _ProcessFramesCountOutput(self, output):
+ """Processes the analyzer output for the different frame counts.
+
+ The frame analyzer outputs additional information about the number of unique
+ frames captured, The max number of repeated frames in a sequence and the
+ max number of skipped frames. These values are then written to the Perf
+ Graph. (Note: Some of the repeated or skipped frames will probably be due to
+ the imperfection of JavaScript timers.)
+
+ Args:
+ output(string): The output from the frame analyzer to be processed.
+ """
+ # The output from frame analyzer will be in the format:
+ # <PSNR and SSIM stats>
+ # Unique_frames_count:<value>
+ # Max_repeated:<value>
+ # Max_skipped:<value>
+ unique_fr_pos = output.rfind('Unique_frames_count')
+ result_str = output[unique_fr_pos:]
+
+ result_list = result_str.split()
+
+ for result in result_list:
+ colon_pos = result.find(':')
+ key = result[:colon_pos]
+ value = result[colon_pos+1:]
+ pyauto_utils.PrintPerfResult(key, 'VGA', value, '')
+
+ def _ProcessPsnrAndSsimOutput(self, output):
+ """Processes the analyzer output to extract the PSNR and SSIM values.
+
+ The frame analyzer produces PSNR and SSIM results for every unique frame
+ that has been captured. This method forms a list of all the psnr and ssim
+ values and passes it to PrintPerfResult() for printing on the Perf Graph.
+
+ Args:
+ output(string): The output from the frame analyzer to be processed.
+ """
+ # The output is in the format:
+ # BSTATS
+ # psnr ssim; psnr ssim; ... psnr ssim;
+ # ESTATS
+ stats_beginning = output.find('BSTATS') # Get the beginning of the stats
+ stats_ending = output.find('ESTATS') # Get the end of the stats
+ stats_str = output[(stats_beginning + len('BSTATS')):stats_ending]
+
+ stats_list = stats_str.split(';')
+
+ psnr = []
+ ssim = []
+
+ for item in stats_list:
+ item = item.strip()
+ if item != '':
+ entry = item.split(' ')
+ psnr.append(float(entry[0]))
+ ssim.append(float(entry[1]))
+
+ pyauto_utils.PrintPerfResult('PSNR', 'VGA', psnr, '')
+ pyauto_utils.PrintPerfResult('SSIM', 'VGA', ssim, '')
+
+
+if __name__ == '__main__':
+ pyauto_functional.Main()
diff --git a/chrome/test/functional/webrtc_write_wsh.py b/chrome/test/functional/webrtc_write_wsh.py
new file mode 100644
index 0000000..460c477
--- /dev/null
+++ b/chrome/test/functional/webrtc_write_wsh.py
@@ -0,0 +1,74 @@
+# Copyright (c) 2012 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+#
+# This module is handler for incoming data to the pywebsocket standalone server
+# (source is in http://code.google.com/p/pywebsocket/source/browse/trunk/src/).
+# It follows the conventions of the pywebsocket server and in our case receives
+# and stores incoming frames to disk.
+
+import Queue
+import os
+import threading
+
+_NUMBER_OF_WRITER_THREADS = 10
+
+# I couldn't think of other way to handle this but through a global variable
+g_frame_number_counter = 0
+g_frames_queue = Queue.Queue()
+
+
+def web_socket_do_extra_handshake(request):
+ pass # Always accept.
+
+
+def web_socket_transfer_data(request):
+ while True:
+ data = request.ws_stream.receive_message()
+ if data is None:
+ return
+
+ # We assume we will receive only frames, i.e. binary data
+ global g_frame_number_counter
+ frame_number = str(g_frame_number_counter)
+ g_frame_number_counter += 1
+ g_frames_queue.put((frame_number, data))
+
+
+class FrameWriterThread(threading.Thread):
+ """Writes received frames to disk.
+
+ The frames are named in the format frame_xxxx, where xxxx is the 0-padded
+ frame number. The frames and their numbers are obtained from a synchronized
+ queue. The frames are written in the directory specified in the environment
+ variable PYWS_DIR_FOR_HANDLER_OUTPUT.
+ """
+ def __init__(self, queue):
+ threading.Thread.__init__(self)
+ self.queue = queue
+
+ def run(self):
+ while True:
+ frame_number, frame_data = self.queue.get()
+ file_name = 'frame_' + frame_number.zfill(4)
+ file_name = os.path.join(os.environ['PYWS_DIR_FOR_HANDLER_OUTPUT'],
+ file_name)
+ frame = open(file_name, "wb")
+ frame.write(frame_data)
+ frame.close()
+ self.queue.task_done()
+
+
+def start_threads():
+ for i in range(_NUMBER_OF_WRITER_THREADS):
+ t = FrameWriterThread(g_frames_queue)
+ t.setDaemon(True)
+ t.start()
+ g_frames_queue.join()
+
+
+# This handler's entire code is imported as 'it is' and then incorporated in the
+# code of the standalone pywebsocket server. If we put this start_threads() call
+# inside a if __name__ == '__main__' clause it wouldn't run this code at all
+# (tested).
+start_threads() \ No newline at end of file