summaryrefslogtreecommitdiffstats
path: root/content/renderer
diff options
context:
space:
mode:
authorjam@chromium.org <jam@chromium.org@0039d316-1c4b-4281-b951-d872f2087c98>2011-03-16 20:40:37 +0000
committerjam@chromium.org <jam@chromium.org@0039d316-1c4b-4281-b951-d872f2087c98>2011-03-16 20:40:37 +0000
commitccc70d8efc4981925423ffa1390c0b4783ea1add (patch)
tree8cfe460075607074359b7f5556705c0ff173e7be /content/renderer
parent9dc1379851640ce5bd544862ce86f44258159ca9 (diff)
downloadchromium_src-ccc70d8efc4981925423ffa1390c0b4783ea1add.zip
chromium_src-ccc70d8efc4981925423ffa1390c0b4783ea1add.tar.gz
chromium_src-ccc70d8efc4981925423ffa1390c0b4783ea1add.tar.bz2
Move core renderer subdirectories to content.
TBR=avi Review URL: http://codereview.chromium.org/6673090 git-svn-id: svn://svn.chromium.org/chrome/trunk/src@78422 0039d316-1c4b-4281-b951-d872f2087c98
Diffstat (limited to 'content/renderer')
-rw-r--r--content/renderer/ggl.cc605
-rw-r--r--content/renderer/ggl.h175
-rw-r--r--content/renderer/media/audio_renderer_impl.cc362
-rw-r--r--content/renderer/media/audio_renderer_impl.h157
-rw-r--r--content/renderer/media/audio_renderer_impl_unittest.cc146
-rw-r--r--content/renderer/media/gles2_video_decode_context.cc122
-rw-r--r--content/renderer/media/gles2_video_decode_context.h113
-rw-r--r--content/renderer/media/ipc_video_decoder.cc207
-rw-r--r--content/renderer/media/ipc_video_decoder.h92
-rw-r--r--content/renderer/p2p/ipc_network_manager.cc25
-rw-r--r--content/renderer/p2p/ipc_network_manager.h32
-rw-r--r--content/renderer/p2p/ipc_socket_factory.cc323
-rw-r--r--content/renderer/p2p/ipc_socket_factory.h43
-rw-r--r--content/renderer/p2p/socket_client.cc124
-rw-r--r--content/renderer/p2p/socket_client.h102
-rw-r--r--content/renderer/p2p/socket_dispatcher.cc85
-rw-r--r--content/renderer/p2p/socket_dispatcher.h71
17 files changed, 2784 insertions, 0 deletions
diff --git a/content/renderer/ggl.cc b/content/renderer/ggl.cc
new file mode 100644
index 0000000..e73f50d
--- /dev/null
+++ b/content/renderer/ggl.cc
@@ -0,0 +1,605 @@
+// Copyright (c) 2010 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "content/renderer/ggl.h"
+
+#include "base/lazy_instance.h"
+#include "base/ref_counted.h"
+#include "base/weak_ptr.h"
+#include "chrome/renderer/command_buffer_proxy.h"
+#include "chrome/renderer/gpu_channel_host.h"
+#include "chrome/renderer/gpu_video_service_host.h"
+#include "chrome/renderer/render_widget.h"
+#include "content/renderer/media/gles2_video_decode_context.h"
+#include "ipc/ipc_channel_handle.h"
+
+#if defined(ENABLE_GPU)
+#include "gpu/command_buffer/client/gles2_cmd_helper.h"
+#include "gpu/command_buffer/client/gles2_implementation.h"
+#include "gpu/command_buffer/client/gles2_lib.h"
+#include "gpu/command_buffer/common/constants.h"
+#include "gpu/common/gpu_trace_event.h"
+#include "gpu/GLES2/gles2_command_buffer.h"
+#endif // ENABLE_GPU
+
+namespace ggl {
+
+#if defined(ENABLE_GPU)
+
+namespace {
+
+const int32 kCommandBufferSize = 1024 * 1024;
+// TODO(kbr): make the transfer buffer size configurable via context
+// creation attributes.
+const int32 kTransferBufferSize = 1024 * 1024;
+
+// Singleton used to initialize and terminate the gles2 library.
+class GLES2Initializer {
+ public:
+ GLES2Initializer() {
+ gles2::Initialize();
+ }
+
+ ~GLES2Initializer() {
+ gles2::Terminate();
+ }
+
+ private:
+ DISALLOW_COPY_AND_ASSIGN(GLES2Initializer);
+};
+
+static base::LazyInstance<GLES2Initializer> g_gles2_initializer(
+ base::LINKER_INITIALIZED);
+
+} // namespace anonymous
+
+// Manages a GL context.
+class Context : public base::SupportsWeakPtr<Context> {
+ public:
+ Context(GpuChannelHost* channel, Context* parent);
+ ~Context();
+
+ // Initialize a GGL context that can be used in association with a a GPU
+ // channel acquired from a RenderWidget or RenderView.
+ bool Initialize(bool onscreen,
+ int render_view_id,
+ const gfx::Size& size,
+ const char* allowed_extensions,
+ const int32* attrib_list);
+
+#if defined(OS_MACOSX)
+ // Asynchronously resizes an onscreen frame buffer.
+ void ResizeOnscreen(const gfx::Size& size);
+#endif
+
+ // Asynchronously resizes an offscreen frame buffer.
+ void ResizeOffscreen(const gfx::Size& size);
+
+ // Provides a callback that will be invoked when SwapBuffers has completed
+ // service side.
+ void SetSwapBuffersCallback(Callback0::Type* callback) {
+ swap_buffers_callback_.reset(callback);
+ }
+
+ void SetContextLostCallback(Callback0::Type* callback) {
+ context_lost_callback_.reset(callback);
+ }
+
+ // For an offscreen frame buffer context, return the frame buffer ID with
+ // respect to the parent.
+ uint32 parent_texture_id() const {
+ return parent_texture_id_;
+ }
+
+ uint32 CreateParentTexture(const gfx::Size& size) const;
+ void DeleteParentTexture(uint32 texture) const;
+
+ // Destroy all resources associated with the GGL context.
+ void Destroy();
+
+ // Make a GGL context current for the calling thread.
+ static bool MakeCurrent(Context* context);
+
+ // Display all content rendered since last call to SwapBuffers.
+ // TODO(apatrick): support rendering to browser window. This function is
+ // not useful at this point.
+ bool SwapBuffers();
+
+ // Create a hardware accelerated video decoder associated with this context.
+ media::VideoDecodeEngine* CreateVideoDecodeEngine();
+
+ // Create a hardware video decode context associated with this context.
+ media::VideoDecodeContext* CreateVideoDecodeContext(MessageLoop* message_loop,
+ bool hardware_decoder);
+
+ // Get the current error code. Clears context's error code afterwards.
+ Error GetError();
+
+ // Replace the current error code with this.
+ void SetError(Error error);
+
+ bool IsCommandBufferContextLost();
+
+ // TODO(gman): Remove this.
+ void DisableShaderTranslation();
+
+ gpu::gles2::GLES2Implementation* gles2_implementation() const {
+ return gles2_implementation_;
+ }
+
+ CommandBufferProxy* command_buffer() const {
+ return command_buffer_;
+ }
+
+ private:
+ void OnSwapBuffers();
+ void OnContextLost();
+
+ scoped_refptr<GpuChannelHost> channel_;
+ base::WeakPtr<Context> parent_;
+ scoped_ptr<Callback0::Type> swap_buffers_callback_;
+ scoped_ptr<Callback0::Type> context_lost_callback_;
+ uint32 parent_texture_id_;
+ CommandBufferProxy* command_buffer_;
+ gpu::gles2::GLES2CmdHelper* gles2_helper_;
+ int32 transfer_buffer_id_;
+ gpu::gles2::GLES2Implementation* gles2_implementation_;
+ gfx::Size size_;
+ Error last_error_;
+
+ DISALLOW_COPY_AND_ASSIGN(Context);
+};
+
+Context::Context(GpuChannelHost* channel, Context* parent)
+ : channel_(channel),
+ parent_(parent ? parent->AsWeakPtr() : base::WeakPtr<Context>()),
+ parent_texture_id_(0),
+ command_buffer_(NULL),
+ gles2_helper_(NULL),
+ transfer_buffer_id_(0),
+ gles2_implementation_(NULL),
+ last_error_(SUCCESS) {
+ DCHECK(channel);
+}
+
+Context::~Context() {
+ Destroy();
+}
+
+bool Context::Initialize(bool onscreen,
+ int render_view_id,
+ const gfx::Size& size,
+ const char* allowed_extensions,
+ const int32* attrib_list) {
+ DCHECK(size.width() >= 0 && size.height() >= 0);
+
+ if (channel_->state() != GpuChannelHost::kConnected)
+ return false;
+
+ // Ensure the gles2 library is initialized first in a thread safe way.
+ g_gles2_initializer.Get();
+
+ // Allocate a frame buffer ID with respect to the parent.
+ if (parent_.get()) {
+ // Flush any remaining commands in the parent context to make sure the
+ // texture id accounting stays consistent.
+ int32 token = parent_->gles2_helper_->InsertToken();
+ parent_->gles2_helper_->WaitForToken(token);
+ parent_texture_id_ = parent_->gles2_implementation_->MakeTextureId();
+ }
+
+ std::vector<int32> attribs;
+ while (attrib_list) {
+ int32 attrib = *attrib_list++;
+ switch (attrib) {
+ // Known attributes
+ case ggl::GGL_ALPHA_SIZE:
+ case ggl::GGL_BLUE_SIZE:
+ case ggl::GGL_GREEN_SIZE:
+ case ggl::GGL_RED_SIZE:
+ case ggl::GGL_DEPTH_SIZE:
+ case ggl::GGL_STENCIL_SIZE:
+ case ggl::GGL_SAMPLES:
+ case ggl::GGL_SAMPLE_BUFFERS:
+ attribs.push_back(attrib);
+ attribs.push_back(*attrib_list++);
+ break;
+ case ggl::GGL_NONE:
+ attribs.push_back(attrib);
+ attrib_list = NULL;
+ break;
+ default:
+ SetError(ggl::BAD_ATTRIBUTE);
+ attribs.push_back(ggl::GGL_NONE);
+ attrib_list = NULL;
+ break;
+ }
+ }
+
+ // Create a proxy to a command buffer in the GPU process.
+ if (onscreen) {
+ command_buffer_ = channel_->CreateViewCommandBuffer(
+ render_view_id,
+ allowed_extensions,
+ attribs);
+ } else {
+ CommandBufferProxy* parent_command_buffer =
+ parent_.get() ? parent_->command_buffer_ : NULL;
+ command_buffer_ = channel_->CreateOffscreenCommandBuffer(
+ parent_command_buffer,
+ size,
+ allowed_extensions,
+ attribs,
+ parent_texture_id_);
+ }
+ if (!command_buffer_) {
+ Destroy();
+ return false;
+ }
+
+ // Initiaize the command buffer.
+ if (!command_buffer_->Initialize(kCommandBufferSize)) {
+ Destroy();
+ return false;
+ }
+
+ command_buffer_->SetSwapBuffersCallback(
+ NewCallback(this, &Context::OnSwapBuffers));
+
+ command_buffer_->SetChannelErrorCallback(
+ NewCallback(this, &Context::OnContextLost));
+
+ // Create the GLES2 helper, which writes the command buffer protocol.
+ gles2_helper_ = new gpu::gles2::GLES2CmdHelper(command_buffer_);
+ if (!gles2_helper_->Initialize(kCommandBufferSize)) {
+ Destroy();
+ return false;
+ }
+
+ // Create a transfer buffer used to copy resources between the renderer
+ // process and the GPU process.
+ transfer_buffer_id_ =
+ command_buffer_->CreateTransferBuffer(kTransferBufferSize);
+ if (transfer_buffer_id_ < 0) {
+ Destroy();
+ return false;
+ }
+
+ // Map the buffer into the renderer process's address space.
+ gpu::Buffer transfer_buffer =
+ command_buffer_->GetTransferBuffer(transfer_buffer_id_);
+ if (!transfer_buffer.ptr) {
+ Destroy();
+ return false;
+ }
+
+ // Create the object exposing the OpenGL API.
+ gles2_implementation_ = new gpu::gles2::GLES2Implementation(
+ gles2_helper_,
+ transfer_buffer.size,
+ transfer_buffer.ptr,
+ transfer_buffer_id_,
+ false);
+
+ size_ = size;
+
+ return true;
+}
+
+#if defined(OS_MACOSX)
+void Context::ResizeOnscreen(const gfx::Size& size) {
+ DCHECK(size.width() > 0 && size.height() > 0);
+ size_ = size;
+ command_buffer_->SetWindowSize(size);
+}
+#endif
+
+void Context::ResizeOffscreen(const gfx::Size& size) {
+ DCHECK(size.width() > 0 && size.height() > 0);
+ if (size_ != size) {
+ command_buffer_->ResizeOffscreenFrameBuffer(size);
+ size_ = size;
+ }
+}
+
+uint32 Context::CreateParentTexture(const gfx::Size& size) const {
+ // Allocate a texture ID with respect to the parent.
+ if (parent_.get()) {
+ if (!MakeCurrent(parent_.get()))
+ return 0;
+ uint32 texture_id = parent_->gles2_implementation_->MakeTextureId();
+ parent_->gles2_implementation_->BindTexture(GL_TEXTURE_2D, texture_id);
+ parent_->gles2_implementation_->TexParameteri(
+ GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_NEAREST);
+ parent_->gles2_implementation_->TexParameteri(
+ GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_NEAREST);
+ parent_->gles2_implementation_->TexParameteri(
+ GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE);
+ parent_->gles2_implementation_->TexParameteri(
+ GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE);
+
+ parent_->gles2_implementation_->TexImage2D(GL_TEXTURE_2D,
+ 0, // mip level
+ GL_RGBA,
+ size.width(),
+ size.height(),
+ 0, // border
+ GL_RGBA,
+ GL_UNSIGNED_BYTE,
+ NULL);
+ // Make sure that the parent texture's storage is allocated before we let
+ // the caller attempt to use it.
+ int32 token = parent_->gles2_helper_->InsertToken();
+ parent_->gles2_helper_->WaitForToken(token);
+ return texture_id;
+ }
+ return 0;
+}
+
+void Context::DeleteParentTexture(uint32 texture) const {
+ if (parent_.get()) {
+ if (!MakeCurrent(parent_.get()))
+ return;
+ parent_->gles2_implementation_->DeleteTextures(1, &texture);
+ }
+}
+
+void Context::Destroy() {
+ if (parent_.get() && parent_texture_id_ != 0)
+ parent_->gles2_implementation_->FreeTextureId(parent_texture_id_);
+
+ delete gles2_implementation_;
+ gles2_implementation_ = NULL;
+
+ if (command_buffer_ && transfer_buffer_id_ != 0) {
+ command_buffer_->DestroyTransferBuffer(transfer_buffer_id_);
+ transfer_buffer_id_ = 0;
+ }
+
+ delete gles2_helper_;
+ gles2_helper_ = NULL;
+
+ if (channel_ && command_buffer_) {
+ channel_->DestroyCommandBuffer(command_buffer_);
+ command_buffer_ = NULL;
+ }
+
+ channel_ = NULL;
+}
+
+bool Context::MakeCurrent(Context* context) {
+ if (context) {
+ gles2::SetGLContext(context->gles2_implementation_);
+
+ // Don't request latest error status from service. Just use the locally
+ // cached information from the last flush.
+ // TODO(apatrick): I'm not sure if this should actually change the
+ // current context if it fails. For now it gets changed even if it fails
+ // because making GL calls with a NULL context crashes.
+ if (context->command_buffer_->GetLastState().error != gpu::error::kNoError)
+ return false;
+ } else {
+ gles2::SetGLContext(NULL);
+ }
+
+ return true;
+}
+
+bool Context::SwapBuffers() {
+ GPU_TRACE_EVENT0("gpu", "Context::SwapBuffers");
+ // Don't request latest error status from service. Just use the locally cached
+ // information from the last flush.
+ if (command_buffer_->GetLastState().error != gpu::error::kNoError)
+ return false;
+
+ gles2_implementation_->SwapBuffers();
+ return true;
+}
+
+media::VideoDecodeEngine* Context::CreateVideoDecodeEngine() {
+ return channel_->gpu_video_service_host()->CreateVideoDecoder(
+ command_buffer_->route_id());
+}
+
+media::VideoDecodeContext* Context::CreateVideoDecodeContext(
+ MessageLoop* message_loop, bool hardware_decoder) {
+ return new Gles2VideoDecodeContext(message_loop, hardware_decoder, this);
+}
+
+Error Context::GetError() {
+ gpu::CommandBuffer::State state = command_buffer_->GetState();
+ if (state.error == gpu::error::kNoError) {
+ Error old_error = last_error_;
+ last_error_ = SUCCESS;
+ return old_error;
+ } else {
+ // All command buffer errors are unrecoverable. The error is treated as a
+ // lost context: destroy the context and create another one.
+ return CONTEXT_LOST;
+ }
+}
+
+void Context::SetError(Error error) {
+ last_error_ = error;
+}
+
+bool Context::IsCommandBufferContextLost() {
+ gpu::CommandBuffer::State state = command_buffer_->GetLastState();
+ return state.error == gpu::error::kLostContext;
+}
+
+// TODO(gman): Remove This
+void Context::DisableShaderTranslation() {
+ gles2_implementation_->CommandBufferEnableCHROMIUM(
+ PEPPER3D_SKIP_GLSL_TRANSLATION);
+}
+
+void Context::OnSwapBuffers() {
+ if (swap_buffers_callback_.get())
+ swap_buffers_callback_->Run();
+}
+
+void Context::OnContextLost() {
+ if (context_lost_callback_.get())
+ context_lost_callback_->Run();
+}
+
+#endif // ENABLE_GPU
+
+Context* CreateViewContext(GpuChannelHost* channel,
+ int render_view_id,
+ const char* allowed_extensions,
+ const int32* attrib_list) {
+#if defined(ENABLE_GPU)
+ scoped_ptr<Context> context(new Context(channel, NULL));
+ if (!context->Initialize(
+ true, render_view_id, gfx::Size(), allowed_extensions, attrib_list))
+ return NULL;
+
+ return context.release();
+#else
+ return NULL;
+#endif
+}
+
+#if defined(OS_MACOSX)
+void ResizeOnscreenContext(Context* context, const gfx::Size& size) {
+#if defined(ENABLE_GPU)
+ context->ResizeOnscreen(size);
+#endif
+}
+#endif
+
+Context* CreateOffscreenContext(GpuChannelHost* channel,
+ Context* parent,
+ const gfx::Size& size,
+ const char* allowed_extensions,
+ const int32* attrib_list) {
+#if defined(ENABLE_GPU)
+ scoped_ptr<Context> context(new Context(channel, parent));
+ if (!context->Initialize(false, 0, size, allowed_extensions, attrib_list))
+ return NULL;
+
+ return context.release();
+#else
+ return NULL;
+#endif
+}
+
+void ResizeOffscreenContext(Context* context, const gfx::Size& size) {
+#if defined(ENABLE_GPU)
+ context->ResizeOffscreen(size);
+#endif
+}
+
+uint32 GetParentTextureId(Context* context) {
+#if defined(ENABLE_GPU)
+ return context->parent_texture_id();
+#else
+ return 0;
+#endif
+}
+
+uint32 CreateParentTexture(Context* context, const gfx::Size& size) {
+#if defined(ENABLE_GPU)
+ return context->CreateParentTexture(size);
+#else
+ return 0;
+#endif
+}
+
+void DeleteParentTexture(Context* context, uint32 texture) {
+#if defined(ENABLE_GPU)
+ context->DeleteParentTexture(texture);
+#endif
+}
+
+void SetSwapBuffersCallback(Context* context,
+ Callback0::Type* callback) {
+#if defined(ENABLE_GPU)
+ context->SetSwapBuffersCallback(callback);
+#endif
+}
+
+void SetContextLostCallback(Context* context,
+ Callback0::Type* callback) {
+#if defined(ENABLE_GPU)
+ context->SetContextLostCallback(callback);
+#endif
+}
+
+bool MakeCurrent(Context* context) {
+#if defined(ENABLE_GPU)
+ return Context::MakeCurrent(context);
+#else
+ return false;
+#endif
+}
+
+bool SwapBuffers(Context* context) {
+#if defined(ENABLE_GPU)
+ if (!context)
+ return false;
+
+ return context->SwapBuffers();
+#else
+ return false;
+#endif
+}
+
+bool DestroyContext(Context* context) {
+#if defined(ENABLE_GPU)
+ if (!context)
+ return false;
+
+ delete context;
+ return true;
+#else
+ return false;
+#endif
+}
+
+media::VideoDecodeEngine* CreateVideoDecodeEngine(Context* context) {
+ return context->CreateVideoDecodeEngine();
+}
+
+media::VideoDecodeContext* CreateVideoDecodeContext(
+ Context* context, MessageLoop* message_loop, bool hardware_decoder) {
+ return context->CreateVideoDecodeContext(message_loop, hardware_decoder);
+}
+
+Error GetError(Context* context) {
+#if defined(ENABLE_GPU)
+ return context->GetError();
+#else
+ return NOT_INITIALIZED;
+#endif
+}
+
+bool IsCommandBufferContextLost(Context* context) {
+ return context->IsCommandBufferContextLost();
+}
+
+// TODO(gman): Remove This
+void DisableShaderTranslation(Context* context) {
+#if defined(ENABLE_GPU)
+ if (context) {
+ context->DisableShaderTranslation();
+ }
+#endif
+}
+
+gpu::gles2::GLES2Implementation* GetImplementation(Context* context) {
+ if (!context)
+ return NULL;
+
+ return context->gles2_implementation();
+}
+
+CommandBufferProxy* GetCommandBufferProxy(Context* context) {
+ DCHECK(context);
+ return context->command_buffer();
+}
+
+} // namespace ggl
diff --git a/content/renderer/ggl.h b/content/renderer/ggl.h
new file mode 100644
index 0000000..fb96d2e
--- /dev/null
+++ b/content/renderer/ggl.h
@@ -0,0 +1,175 @@
+// Copyright (c) 2010 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// This API is consistent with other OpenGL setup APIs like window's WGL
+// and pepper's PGL. This API is used to manage OpenGL contexts in the Chrome
+// renderer process in a way that is consistent with other platforms. It is
+// a C style API to ease porting of existing OpenGL software to Chrome.
+
+#ifndef CONTENT_RENDERER_GGL_H_
+#define CONTENT_RENDERER_GGL_H_
+#pragma once
+
+#include "base/callback.h"
+#include "ui/gfx/native_widget_types.h"
+#include "ui/gfx/size.h"
+
+class GpuChannelHost;
+class MessageLoop;
+class CommandBufferProxy;
+
+namespace gpu {
+namespace gles2 {
+class GLES2Implementation;
+}
+}
+
+namespace media {
+class VideoDecodeContext;
+class VideoDecodeEngine;
+}
+
+namespace ggl {
+
+class Context;
+
+// These are the same error codes as used by EGL.
+enum Error {
+ SUCCESS = 0x3000,
+ NOT_INITIALIZED = 0x3001,
+ BAD_ATTRIBUTE = 0x3004,
+ BAD_CONTEXT = 0x3006,
+ CONTEXT_LOST = 0x300E
+};
+
+// Context configuration attributes. These are the same as used by EGL.
+// Attributes are matched using a closest fit algorithm.
+const int32 GGL_ALPHA_SIZE = 0x3021;
+const int32 GGL_BLUE_SIZE = 0x3022;
+const int32 GGL_GREEN_SIZE = 0x3023;
+const int32 GGL_RED_SIZE = 0x3024;
+const int32 GGL_DEPTH_SIZE = 0x3025;
+const int32 GGL_STENCIL_SIZE = 0x3026;
+const int32 GGL_SAMPLES = 0x3031;
+const int32 GGL_SAMPLE_BUFFERS = 0x3032;
+const int32 GGL_NONE = 0x3038; // Attrib list = terminator
+
+// Initialize the GGL library. This must have completed before any other GGL
+// functions are invoked.
+bool Initialize();
+
+// Terminate the GGL library. This must be called after any other GGL functions
+// have completed.
+bool Terminate();
+
+// Create a GGL context that renders directly to a view. The view and the
+// associated window must not be destroyed until the returned context has been
+// destroyed, otherwise the GPU process might attempt to render to an invalid
+// window handle.
+//
+// NOTE: on Mac OS X, this entry point is only used to set up the
+// accelerated compositor's output. On this platform, we actually pass
+// a gfx::PluginWindowHandle in place of the gfx::NativeViewId,
+// because the facility to allocate a fake PluginWindowHandle is
+// already in place. We could add more entry points and messages to
+// allocate both fake PluginWindowHandles and NativeViewIds and map
+// from fake NativeViewIds to PluginWindowHandles, but this seems like
+// unnecessary complexity at the moment.
+//
+// The render_view_id is currently also only used on Mac OS X.
+// TODO(kbr): clean up the arguments to this function and make them
+// more cross-platform.
+Context* CreateViewContext(GpuChannelHost* channel,
+ int render_view_id,
+ const char* allowed_extensions,
+ const int32* attrib_list);
+
+#if defined(OS_MACOSX)
+// On Mac OS X only, view contexts actually behave like offscreen contexts, and
+// require an explicit resize operation which is slightly different from that
+// of offscreen contexts.
+void ResizeOnscreenContext(Context* context, const gfx::Size& size);
+#endif
+
+// Create a GGL context that renders to an offscreen frame buffer. If parent is
+// not NULL, that context can access a copy of the created
+// context's frame buffer that is updated every time SwapBuffers is called. It
+// is not as general as shared contexts in other implementations of OpenGL. If
+// parent is not NULL, it must be used on the same thread as the parent. A child
+// context may not outlive its parent. attrib_list must be NULL or a
+// GGL_NONE-terminated list of attribute/value pairs.
+Context* CreateOffscreenContext(GpuChannelHost* channel,
+ Context* parent,
+ const gfx::Size& size,
+ const char* allowed_extensions,
+ const int32* attrib_list);
+
+// Resize an offscreen frame buffer. The resize occurs on the next call to
+// SwapBuffers. This is to avoid waiting until all pending GL calls have been
+// executed by the GPU process. Everything rendered up to the call to
+// SwapBuffers will be lost. A lost context will be reported if the resize
+// fails.
+void ResizeOffscreenContext(Context* context, const gfx::Size& size);
+
+// For an offscreen frame buffer context, return the texture ID with
+// respect to the parent context. Returns zero if context does not have a
+// parent.
+uint32 GetParentTextureId(Context* context);
+
+// Create a new texture in the parent's context. Returns zero if context
+// does not have a parent.
+uint32 CreateParentTexture(Context* context, const gfx::Size& size);
+
+// Deletes a texture in the parent's context.
+void DeleteParentTexture(Context* context, uint32 texture);
+
+// Provides a callback that will be invoked when SwapBuffers has completed
+// service side.
+void SetSwapBuffersCallback(Context* context, Callback0::Type* callback);
+
+void SetContextLostCallback(Context* context, Callback0::Type* callback);
+
+// Set the current GGL context for the calling thread.
+bool MakeCurrent(Context* context);
+
+// For a view context, display everything that has been rendered since the
+// last call. For an offscreen context, resolve everything that has been
+// rendered since the last call to a copy that can be accessed by the parent
+// context.
+bool SwapBuffers(Context* context);
+
+// Destroy the given GGL context.
+bool DestroyContext(Context* context);
+
+// Create a hardware video decode engine corresponding to the context.
+media::VideoDecodeEngine* CreateVideoDecodeEngine(Context* context);
+
+// Create a hardware video decode context to pair with the hardware video
+// decode engine. It can also be used with a software decode engine.
+//
+// Set |hardware_decoder| to true if this context is for a hardware video
+// engine. |message_loop| is where the decode context should run on.
+media::VideoDecodeContext* CreateVideoDecodeContext(Context* context,
+ MessageLoop* message_loop,
+ bool hardware_decoder);
+
+// TODO(gman): Remove this
+void DisableShaderTranslation(Context* context);
+
+// Allows direct access to the GLES2 implementation so a context
+// can be used without making it current.
+gpu::gles2::GLES2Implementation* GetImplementation(Context* context);
+
+// Return the current GGL error.
+Error GetError(Context* context);
+
+// Return true if GPU process reported context lost or there was a problem
+// communicating with the GPU process.
+bool IsCommandBufferContextLost(Context* context);
+
+CommandBufferProxy* GetCommandBufferProxy(Context* context);
+
+} // namespace ggl
+
+#endif // CONTENT_RENDERER_GGL_H_
diff --git a/content/renderer/media/audio_renderer_impl.cc b/content/renderer/media/audio_renderer_impl.cc
new file mode 100644
index 0000000..86338e9
--- /dev/null
+++ b/content/renderer/media/audio_renderer_impl.cc
@@ -0,0 +1,362 @@
+// Copyright (c) 2010 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "content/renderer/media/audio_renderer_impl.h"
+
+#include <math.h>
+
+#include "chrome/common/render_messages.h"
+#include "chrome/common/render_messages_params.h"
+#include "chrome/renderer/audio_message_filter.h"
+#include "chrome/renderer/render_view.h"
+#include "chrome/renderer/render_thread.h"
+#include "media/base/filter_host.h"
+
+namespace {
+
+// We will try to fill 200 ms worth of audio samples in each packet. A round
+// trip latency for IPC messages are typically 10 ms, this should give us
+// plenty of time to avoid clicks.
+const int kMillisecondsPerPacket = 200;
+
+// We have at most 3 packets in browser, i.e. 600 ms. This is a reasonable
+// amount to avoid clicks.
+const int kPacketsInBuffer = 3;
+
+} // namespace
+
+AudioRendererImpl::AudioRendererImpl(AudioMessageFilter* filter)
+ : AudioRendererBase(),
+ bytes_per_second_(0),
+ filter_(filter),
+ stream_id_(0),
+ shared_memory_(NULL),
+ shared_memory_size_(0),
+ io_loop_(filter->message_loop()),
+ stopped_(false),
+ pending_request_(false),
+ prerolling_(false),
+ preroll_bytes_(0) {
+ DCHECK(io_loop_);
+}
+
+AudioRendererImpl::~AudioRendererImpl() {
+}
+
+base::TimeDelta AudioRendererImpl::ConvertToDuration(int bytes) {
+ if (bytes_per_second_) {
+ return base::TimeDelta::FromMicroseconds(
+ base::Time::kMicrosecondsPerSecond * bytes / bytes_per_second_);
+ }
+ return base::TimeDelta();
+}
+
+bool AudioRendererImpl::OnInitialize(const media::MediaFormat& media_format) {
+ // Parse integer values in MediaFormat.
+ if (!ParseMediaFormat(media_format,
+ &params_.channels,
+ &params_.sample_rate,
+ &params_.bits_per_sample)) {
+ return false;
+ }
+ params_.format = AudioParameters::AUDIO_PCM_LINEAR;
+
+ // Calculate the number of bytes per second using information of the stream.
+ bytes_per_second_ = params_.sample_rate * params_.channels *
+ params_.bits_per_sample / 8;
+
+ io_loop_->PostTask(FROM_HERE,
+ NewRunnableMethod(this, &AudioRendererImpl::CreateStreamTask, params_));
+ return true;
+}
+
+void AudioRendererImpl::OnStop() {
+ base::AutoLock auto_lock(lock_);
+ if (stopped_)
+ return;
+ stopped_ = true;
+
+ // We should never touch |io_loop_| after being stopped, so post our final
+ // task to clean up.
+ io_loop_->PostTask(FROM_HERE,
+ NewRunnableMethod(this, &AudioRendererImpl::DestroyTask));
+}
+
+void AudioRendererImpl::ConsumeAudioSamples(
+ scoped_refptr<media::Buffer> buffer_in) {
+ base::AutoLock auto_lock(lock_);
+ if (stopped_)
+ return;
+
+ // TODO(hclam): handle end of stream here.
+
+ // Use the base class to queue the buffer.
+ AudioRendererBase::ConsumeAudioSamples(buffer_in);
+
+ // Post a task to render thread to notify a packet reception.
+ io_loop_->PostTask(FROM_HERE,
+ NewRunnableMethod(this, &AudioRendererImpl::NotifyPacketReadyTask));
+}
+
+void AudioRendererImpl::SetPlaybackRate(float rate) {
+ DCHECK(rate >= 0.0f);
+
+ base::AutoLock auto_lock(lock_);
+ // Handle the case where we stopped due to |io_loop_| dying.
+ if (stopped_) {
+ AudioRendererBase::SetPlaybackRate(rate);
+ return;
+ }
+
+ // We have two cases here:
+ // Play: GetPlaybackRate() == 0.0 && rate != 0.0
+ // Pause: GetPlaybackRate() != 0.0 && rate == 0.0
+ if (GetPlaybackRate() == 0.0f && rate != 0.0f) {
+ io_loop_->PostTask(FROM_HERE,
+ NewRunnableMethod(this, &AudioRendererImpl::PlayTask));
+ } else if (GetPlaybackRate() != 0.0f && rate == 0.0f) {
+ // Pause is easy, we can always pause.
+ io_loop_->PostTask(FROM_HERE,
+ NewRunnableMethod(this, &AudioRendererImpl::PauseTask));
+ }
+ AudioRendererBase::SetPlaybackRate(rate);
+
+ // If we are playing, give a kick to try fulfilling the packet request as
+ // the previous packet request may be stalled by a pause.
+ if (rate > 0.0f) {
+ io_loop_->PostTask(
+ FROM_HERE,
+ NewRunnableMethod(this, &AudioRendererImpl::NotifyPacketReadyTask));
+ }
+}
+
+void AudioRendererImpl::Pause(media::FilterCallback* callback) {
+ AudioRendererBase::Pause(callback);
+ base::AutoLock auto_lock(lock_);
+ if (stopped_)
+ return;
+
+ io_loop_->PostTask(FROM_HERE,
+ NewRunnableMethod(this, &AudioRendererImpl::PauseTask));
+}
+
+void AudioRendererImpl::Seek(base::TimeDelta time,
+ media::FilterCallback* callback) {
+ AudioRendererBase::Seek(time, callback);
+ base::AutoLock auto_lock(lock_);
+ if (stopped_)
+ return;
+
+ io_loop_->PostTask(FROM_HERE,
+ NewRunnableMethod(this, &AudioRendererImpl::SeekTask));
+}
+
+
+void AudioRendererImpl::Play(media::FilterCallback* callback) {
+ AudioRendererBase::Play(callback);
+ base::AutoLock auto_lock(lock_);
+ if (stopped_)
+ return;
+
+ if (GetPlaybackRate() != 0.0f) {
+ io_loop_->PostTask(FROM_HERE,
+ NewRunnableMethod(this, &AudioRendererImpl::PlayTask));
+ } else {
+ io_loop_->PostTask(FROM_HERE,
+ NewRunnableMethod(this, &AudioRendererImpl::PauseTask));
+ }
+}
+
+void AudioRendererImpl::SetVolume(float volume) {
+ base::AutoLock auto_lock(lock_);
+ if (stopped_)
+ return;
+ io_loop_->PostTask(FROM_HERE,
+ NewRunnableMethod(
+ this, &AudioRendererImpl::SetVolumeTask, volume));
+}
+
+void AudioRendererImpl::OnCreated(base::SharedMemoryHandle handle,
+ uint32 length) {
+ DCHECK(MessageLoop::current() == io_loop_);
+
+ base::AutoLock auto_lock(lock_);
+ if (stopped_)
+ return;
+
+ shared_memory_.reset(new base::SharedMemory(handle, false));
+ shared_memory_->Map(length);
+ shared_memory_size_ = length;
+}
+
+void AudioRendererImpl::OnLowLatencyCreated(base::SharedMemoryHandle,
+ base::SyncSocket::Handle, uint32) {
+ // AudioRenderer should not have a low-latency audio channel.
+ NOTREACHED();
+}
+
+void AudioRendererImpl::OnRequestPacket(AudioBuffersState buffers_state) {
+ DCHECK(MessageLoop::current() == io_loop_);
+
+ {
+ base::AutoLock auto_lock(lock_);
+ DCHECK(!pending_request_);
+ pending_request_ = true;
+ request_buffers_state_ = buffers_state;
+ }
+
+ // Try to fill in the fulfill the packet request.
+ NotifyPacketReadyTask();
+}
+
+void AudioRendererImpl::OnStateChanged(
+ const ViewMsg_AudioStreamState_Params& state) {
+ DCHECK(MessageLoop::current() == io_loop_);
+
+ base::AutoLock auto_lock(lock_);
+ if (stopped_)
+ return;
+
+ switch (state.state) {
+ case ViewMsg_AudioStreamState_Params::kError:
+ // We receive this error if we counter an hardware error on the browser
+ // side. We can proceed with ignoring the audio stream.
+ // TODO(hclam): We need more handling of these kind of error. For example
+ // re-try creating the audio output stream on the browser side or fail
+ // nicely and report to demuxer that the whole audio stream is discarded.
+ host()->DisableAudioRenderer();
+ break;
+ // TODO(hclam): handle these events.
+ case ViewMsg_AudioStreamState_Params::kPlaying:
+ case ViewMsg_AudioStreamState_Params::kPaused:
+ break;
+ default:
+ NOTREACHED();
+ break;
+ }
+}
+
+void AudioRendererImpl::OnVolume(double volume) {
+ // TODO(hclam): decide whether we need to report the current volume to
+ // pipeline.
+}
+
+void AudioRendererImpl::CreateStreamTask(const AudioParameters& audio_params) {
+ DCHECK(MessageLoop::current() == io_loop_);
+
+ base::AutoLock auto_lock(lock_);
+ if (stopped_)
+ return;
+
+ // Make sure we don't call create more than once.
+ DCHECK_EQ(0, stream_id_);
+ stream_id_ = filter_->AddDelegate(this);
+ io_loop_->AddDestructionObserver(this);
+
+ ViewHostMsg_Audio_CreateStream_Params params;
+ params.params = audio_params;
+
+ // Let the browser choose packet size.
+ params.params.samples_per_packet = 0;
+
+ filter_->Send(new ViewHostMsg_CreateAudioStream(0, stream_id_, params,
+ false));
+}
+
+void AudioRendererImpl::PlayTask() {
+ DCHECK(MessageLoop::current() == io_loop_);
+
+ filter_->Send(new ViewHostMsg_PlayAudioStream(0, stream_id_));
+}
+
+void AudioRendererImpl::PauseTask() {
+ DCHECK(MessageLoop::current() == io_loop_);
+
+ filter_->Send(new ViewHostMsg_PauseAudioStream(0, stream_id_));
+}
+
+void AudioRendererImpl::SeekTask() {
+ DCHECK(MessageLoop::current() == io_loop_);
+
+ // We have to pause the audio stream before we can flush.
+ filter_->Send(new ViewHostMsg_PauseAudioStream(0, stream_id_));
+ filter_->Send(new ViewHostMsg_FlushAudioStream(0, stream_id_));
+}
+
+void AudioRendererImpl::DestroyTask() {
+ DCHECK(MessageLoop::current() == io_loop_);
+
+ // Make sure we don't call destroy more than once.
+ DCHECK_NE(0, stream_id_);
+ filter_->RemoveDelegate(stream_id_);
+ filter_->Send(new ViewHostMsg_CloseAudioStream(0, stream_id_));
+ io_loop_->RemoveDestructionObserver(this);
+ stream_id_ = 0;
+}
+
+void AudioRendererImpl::SetVolumeTask(double volume) {
+ DCHECK(MessageLoop::current() == io_loop_);
+
+ base::AutoLock auto_lock(lock_);
+ if (stopped_)
+ return;
+ filter_->Send(new ViewHostMsg_SetAudioVolume(0, stream_id_, volume));
+}
+
+void AudioRendererImpl::NotifyPacketReadyTask() {
+ DCHECK(MessageLoop::current() == io_loop_);
+
+ base::AutoLock auto_lock(lock_);
+ if (stopped_)
+ return;
+ if (pending_request_ && GetPlaybackRate() > 0.0f) {
+ DCHECK(shared_memory_.get());
+
+ // Adjust the playback delay.
+ base::Time current_time = base::Time::Now();
+
+ base::TimeDelta request_delay =
+ ConvertToDuration(request_buffers_state_.total_bytes());
+
+ // Add message delivery delay.
+ if (current_time > request_buffers_state_.timestamp) {
+ base::TimeDelta receive_latency =
+ current_time - request_buffers_state_.timestamp;
+
+ // If the receive latency is too much it may offset all the delay.
+ if (receive_latency >= request_delay) {
+ request_delay = base::TimeDelta();
+ } else {
+ request_delay -= receive_latency;
+ }
+ }
+
+ // Finally we need to adjust the delay according to playback rate.
+ if (GetPlaybackRate() != 1.0f) {
+ request_delay = base::TimeDelta::FromMicroseconds(
+ static_cast<int64>(ceil(request_delay.InMicroseconds() *
+ GetPlaybackRate())));
+ }
+
+ uint32 filled = FillBuffer(static_cast<uint8*>(shared_memory_->memory()),
+ shared_memory_size_, request_delay,
+ request_buffers_state_.pending_bytes == 0);
+ pending_request_ = false;
+ // Then tell browser process we are done filling into the buffer.
+ filter_->Send(
+ new ViewHostMsg_NotifyAudioPacketReady(0, stream_id_, filled));
+ }
+}
+
+void AudioRendererImpl::WillDestroyCurrentMessageLoop() {
+ DCHECK(MessageLoop::current() == io_loop_);
+
+ // We treat the IO loop going away the same as stopping.
+ base::AutoLock auto_lock(lock_);
+ if (stopped_)
+ return;
+
+ stopped_ = true;
+ DestroyTask();
+}
diff --git a/content/renderer/media/audio_renderer_impl.h b/content/renderer/media/audio_renderer_impl.h
new file mode 100644
index 0000000..c9f11a3
--- /dev/null
+++ b/content/renderer/media/audio_renderer_impl.h
@@ -0,0 +1,157 @@
+// Copyright (c) 2010 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+//
+// Audio rendering unit utilizing audio output stream provided by browser
+// process through IPC.
+//
+// Relationship of classes.
+//
+// AudioRendererHost AudioRendererImpl
+// ^ ^
+// | |
+// v IPC v
+// RenderMessageFilter <---------> AudioMessageFilter
+//
+// Implementation of interface with audio device is in AudioRendererHost and
+// it provides services and entry points in RenderMessageFilter, allowing
+// usage of IPC calls to interact with audio device. AudioMessageFilter acts
+// as a portal for IPC calls and does no more than delegation.
+//
+// Transportation of audio buffer is done by using shared memory, after
+// OnCreateStream is executed, OnCreated would be called along with a
+// SharedMemoryHandle upon successful creation of audio output stream in the
+// browser process. The same piece of shared memory would be used during the
+// lifetime of this unit.
+//
+// This class lives inside three threads during it's lifetime, namely:
+// 1. IO thread.
+// The thread within which this class receives all the IPC messages and
+// IPC communications can only happen in this thread.
+// 2. Pipeline thread
+// Initialization of filter and proper stopping of filters happens here.
+// Properties of this filter is also set in this thread.
+// 3. Audio decoder thread (If there's one.)
+// Responsible for decoding audio data and gives raw PCM data to this object.
+
+#ifndef CONTENT_RENDERER_MEDIA_AUDIO_RENDERER_IMPL_H_
+#define CONTENT_RENDERER_MEDIA_AUDIO_RENDERER_IMPL_H_
+#pragma once
+
+#include "base/gtest_prod_util.h"
+#include "base/message_loop.h"
+#include "base/scoped_ptr.h"
+#include "base/shared_memory.h"
+#include "base/synchronization/lock.h"
+#include "chrome/renderer/audio_message_filter.h"
+#include "media/audio/audio_io.h"
+#include "media/audio/audio_manager.h"
+#include "media/base/filters.h"
+#include "media/filters/audio_renderer_base.h"
+
+class AudioMessageFilter;
+
+class AudioRendererImpl : public media::AudioRendererBase,
+ public AudioMessageFilter::Delegate,
+ public MessageLoop::DestructionObserver {
+ public:
+ // Methods called on Render thread ------------------------------------------
+ explicit AudioRendererImpl(AudioMessageFilter* filter);
+ virtual ~AudioRendererImpl();
+
+ // Methods called on IO thread ----------------------------------------------
+ // AudioMessageFilter::Delegate methods, called by AudioMessageFilter.
+ virtual void OnRequestPacket(AudioBuffersState buffers_state);
+ virtual void OnStateChanged(const ViewMsg_AudioStreamState_Params& state);
+ virtual void OnCreated(base::SharedMemoryHandle handle, uint32 length);
+ virtual void OnLowLatencyCreated(base::SharedMemoryHandle handle,
+ base::SyncSocket::Handle socket_handle,
+ uint32 length);
+ virtual void OnVolume(double volume);
+
+ // Methods called on pipeline thread ----------------------------------------
+ // media::Filter implementation.
+ virtual void SetPlaybackRate(float rate);
+ virtual void Pause(media::FilterCallback* callback);
+ virtual void Seek(base::TimeDelta time, media::FilterCallback* callback);
+ virtual void Play(media::FilterCallback* callback);
+
+ // media::AudioRenderer implementation.
+ virtual void SetVolume(float volume);
+
+ protected:
+ // Methods called on audio renderer thread ----------------------------------
+ // These methods are called from AudioRendererBase.
+ virtual bool OnInitialize(const media::MediaFormat& media_format);
+ virtual void OnStop();
+
+ // Called when the decoder completes a Read().
+ virtual void ConsumeAudioSamples(scoped_refptr<media::Buffer> buffer_in);
+
+ private:
+ // For access to constructor and IO thread methods.
+ friend class AudioRendererImplTest;
+ FRIEND_TEST_ALL_PREFIXES(AudioRendererImplTest, Stop);
+ FRIEND_TEST_ALL_PREFIXES(AudioRendererImplTest,
+ DestroyedMessageLoop_ConsumeAudioSamples);
+ // Helper methods.
+ // Convert number of bytes to duration of time using information about the
+ // number of channels, sample rate and sample bits.
+ base::TimeDelta ConvertToDuration(int bytes);
+
+ // Methods call on IO thread ------------------------------------------------
+ // The following methods are tasks posted on the IO thread that needs to
+ // be executed on that thread. They interact with AudioMessageFilter and
+ // sends IPC messages on that thread.
+ void CreateStreamTask(const AudioParameters& params);
+ void PlayTask();
+ void PauseTask();
+ void SeekTask();
+ void SetVolumeTask(double volume);
+ void NotifyPacketReadyTask();
+ void DestroyTask();
+
+ // Called on IO thread when message loop is dying.
+ virtual void WillDestroyCurrentMessageLoop();
+
+ // Information about the audio stream.
+ AudioParameters params_;
+ uint32 bytes_per_second_;
+
+ scoped_refptr<AudioMessageFilter> filter_;
+
+ // ID of the stream created in the browser process.
+ int32 stream_id_;
+
+ // Memory shared by the browser process for audio buffer.
+ scoped_ptr<base::SharedMemory> shared_memory_;
+ uint32 shared_memory_size_;
+
+ // Message loop for the IO thread.
+ MessageLoop* io_loop_;
+
+ // Protects:
+ // - |stopped_|
+ // - |pending_request_|
+ // - |request_buffers_state_|
+ base::Lock lock_;
+
+ // A flag that indicates this filter is called to stop.
+ bool stopped_;
+
+ // A flag that indicates an outstanding packet request.
+ bool pending_request_;
+
+ // State of the audio buffers at time of the last request.
+ AudioBuffersState request_buffers_state_;
+
+ // State variables for prerolling.
+ bool prerolling_;
+
+ // Remaining bytes for prerolling to complete.
+ uint32 preroll_bytes_;
+
+ DISALLOW_COPY_AND_ASSIGN(AudioRendererImpl);
+};
+
+#endif // CONTENT_RENDERER_MEDIA_AUDIO_RENDERER_IMPL_H_
diff --git a/content/renderer/media/audio_renderer_impl_unittest.cc b/content/renderer/media/audio_renderer_impl_unittest.cc
new file mode 100644
index 0000000..43ff9e5
--- /dev/null
+++ b/content/renderer/media/audio_renderer_impl_unittest.cc
@@ -0,0 +1,146 @@
+// Copyright (c) 2011 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/process_util.h"
+#include "chrome/common/render_messages.h"
+#include "chrome/common/render_messages_params.h"
+#include "content/renderer/media/audio_renderer_impl.h"
+#include "media/base/data_buffer.h"
+#include "media/base/media_format.h"
+#include "media/base/mock_callback.h"
+#include "media/base/mock_filter_host.h"
+#include "media/base/mock_filters.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+using ::testing::ReturnRef;
+
+class AudioRendererImplTest : public ::testing::Test {
+ public:
+ static const int kRouteId = 0;
+ static const int kSize = 1024;
+
+ AudioRendererImplTest() {
+ message_loop_.reset(new MessageLoop(MessageLoop::TYPE_IO));
+
+ // TODO(scherkus): use gmock with AudioMessageFilter to verify
+ // AudioRendererImpl calls or doesn't call Send().
+ filter_ = new AudioMessageFilter(kRouteId);
+ filter_->message_loop_ = message_loop_.get();
+
+ // Create temporary shared memory.
+ CHECK(shared_mem_.CreateAnonymous(kSize));
+
+ // Setup expectations for initialization.
+ decoder_ = new media::MockAudioDecoder();
+
+ // Associate media format with decoder
+ decoder_media_format_.SetAsInteger(media::MediaFormat::kChannels, 2);
+ decoder_media_format_.SetAsInteger(media::MediaFormat::kSampleRate, 48000);
+ decoder_media_format_.SetAsInteger(media::MediaFormat::kSampleBits, 16);
+ EXPECT_CALL(*decoder_, media_format())
+ .WillRepeatedly(ReturnRef(decoder_media_format_));
+
+ // Create and initialize audio renderer.
+ renderer_ = new AudioRendererImpl(filter_);
+ renderer_->set_host(&host_);
+ renderer_->Initialize(decoder_, media::NewExpectedCallback());
+
+ // Run pending tasks and simulate responding with a created audio stream.
+ message_loop_->RunAllPending();
+
+ // Duplicate the shared memory handle so both the test and the callee can
+ // close their copy.
+ base::SharedMemoryHandle duplicated_handle;
+ EXPECT_TRUE(shared_mem_.ShareToProcess(base::GetCurrentProcessHandle(),
+ &duplicated_handle));
+
+ renderer_->OnCreated(duplicated_handle, kSize);
+ }
+
+ virtual ~AudioRendererImplTest() {
+ }
+
+ protected:
+ // Fixtures.
+ scoped_ptr<MessageLoop> message_loop_;
+ scoped_refptr<AudioMessageFilter> filter_;
+ base::SharedMemory shared_mem_;
+ media::MockFilterHost host_;
+ scoped_refptr<media::MockAudioDecoder> decoder_;
+ scoped_refptr<AudioRendererImpl> renderer_;
+ media::MediaFormat decoder_media_format_;
+
+ private:
+ DISALLOW_COPY_AND_ASSIGN(AudioRendererImplTest);
+};
+
+TEST_F(AudioRendererImplTest, SetPlaybackRate) {
+ // Execute SetPlaybackRate() codepath to create an IPC message.
+
+ // Toggle play/pause to generate some IPC messages.
+ renderer_->SetPlaybackRate(0.0f);
+ renderer_->SetPlaybackRate(1.0f);
+ renderer_->SetPlaybackRate(0.0f);
+
+ renderer_->Stop(media::NewExpectedCallback());
+ message_loop_->RunAllPending();
+}
+
+TEST_F(AudioRendererImplTest, SetVolume) {
+ // Execute SetVolume() codepath to create an IPC message.
+ renderer_->SetVolume(0.5f);
+ renderer_->Stop(media::NewExpectedCallback());
+ message_loop_->RunAllPending();
+}
+
+TEST_F(AudioRendererImplTest, Stop) {
+ // Declare some state messages.
+ const ViewMsg_AudioStreamState_Params kError(
+ ViewMsg_AudioStreamState_Params::kError);
+ const ViewMsg_AudioStreamState_Params kPlaying(
+ ViewMsg_AudioStreamState_Params::kPlaying);
+ const ViewMsg_AudioStreamState_Params kPaused(
+ ViewMsg_AudioStreamState_Params::kPaused);
+
+ // Execute Stop() codepath to create an IPC message.
+ renderer_->Stop(media::NewExpectedCallback());
+ message_loop_->RunAllPending();
+
+ // Run AudioMessageFilter::Delegate methods, which can be executed after being
+ // stopped. AudioRendererImpl shouldn't create any messages.
+ renderer_->OnRequestPacket(AudioBuffersState(kSize, 0));
+ renderer_->OnStateChanged(kError);
+ renderer_->OnStateChanged(kPlaying);
+ renderer_->OnStateChanged(kPaused);
+ renderer_->OnCreated(shared_mem_.handle(), kSize);
+ renderer_->OnVolume(0.5);
+
+ // It's possible that the upstream decoder replies right after being stopped.
+ scoped_refptr<media::Buffer> buffer(new media::DataBuffer(kSize));
+ renderer_->ConsumeAudioSamples(buffer);
+}
+
+TEST_F(AudioRendererImplTest, DestroyedMessageLoop_SetPlaybackRate) {
+ // Kill the message loop and verify SetPlaybackRate() still works.
+ message_loop_.reset();
+ renderer_->SetPlaybackRate(0.0f);
+ renderer_->SetPlaybackRate(1.0f);
+ renderer_->SetPlaybackRate(0.0f);
+ renderer_->Stop(media::NewExpectedCallback());
+}
+
+TEST_F(AudioRendererImplTest, DestroyedMessageLoop_SetVolume) {
+ // Kill the message loop and verify SetVolume() still works.
+ message_loop_.reset();
+ renderer_->SetVolume(0.5f);
+ renderer_->Stop(media::NewExpectedCallback());
+}
+
+TEST_F(AudioRendererImplTest, DestroyedMessageLoop_ConsumeAudioSamples) {
+ // Kill the message loop and verify OnReadComplete() still works.
+ message_loop_.reset();
+ scoped_refptr<media::Buffer> buffer(new media::DataBuffer(kSize));
+ renderer_->ConsumeAudioSamples(buffer);
+ renderer_->Stop(media::NewExpectedCallback());
+}
diff --git a/content/renderer/media/gles2_video_decode_context.cc b/content/renderer/media/gles2_video_decode_context.cc
new file mode 100644
index 0000000..8ca9b02
--- /dev/null
+++ b/content/renderer/media/gles2_video_decode_context.cc
@@ -0,0 +1,122 @@
+// Copyright (c) 2010 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include <GLES2/gl2.h>
+
+#include "base/message_loop.h"
+#include "content/renderer/ggl.h"
+#include "content/renderer/media/gles2_video_decode_context.h"
+
+Gles2VideoDecodeContext::Gles2VideoDecodeContext(
+ MessageLoop* message_loop, bool memory_mapped, ggl::Context* context)
+ : message_loop_(message_loop),
+ memory_mapped_(memory_mapped),
+ context_(context) {
+}
+
+Gles2VideoDecodeContext::~Gles2VideoDecodeContext() {
+}
+
+void* Gles2VideoDecodeContext::GetDevice() {
+ // This decode context is used inside the renderer and so hardware decoder
+ // device handler should not be used.
+ return NULL;
+}
+
+void Gles2VideoDecodeContext::AllocateVideoFrames(
+ int num_frames, size_t width, size_t height,
+ media::VideoFrame::Format format,
+ std::vector<scoped_refptr<media::VideoFrame> >* frames_out, Task* task) {
+ if (MessageLoop::current() != message_loop_) {
+ message_loop_->PostTask(
+ FROM_HERE,
+ NewRunnableMethod(this, &Gles2VideoDecodeContext::AllocateVideoFrames,
+ num_frames, width, height, format, frames_out,
+ task));
+ return;
+ }
+
+ // In this method we need to make the ggl context current and then generate
+ // textures for each video frame. We also need to allocate memory for each
+ // texture generated.
+ bool ret = ggl::MakeCurrent(context_);
+ CHECK(ret) << "Failed to switch context";
+
+ frames_.resize(num_frames);
+ for (int i = 0; i < num_frames; ++i) {
+ int planes = media::VideoFrame::GetNumberOfPlanes(format);
+ media::VideoFrame::GlTexture textures[media::VideoFrame::kMaxPlanes];
+
+ // Set the color format of the textures.
+ DCHECK(format == media::VideoFrame::RGBA ||
+ format == media::VideoFrame::YV12);
+ int gl_format = format == media::VideoFrame::RGBA ? GL_RGBA : GL_LUMINANCE;
+
+ glGenTextures(planes, textures);
+ for (int j = 0; j < planes; ++j) {
+ glBindTexture(GL_TEXTURE_2D, textures[j]);
+ glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_NEAREST);
+ glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_NEAREST);
+ glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE);
+ glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE);
+ glTexImage2D(GL_TEXTURE_2D, 0, gl_format, width, height, 0, gl_format,
+ GL_UNSIGNED_BYTE, NULL);
+ }
+ glFlush();
+
+ scoped_refptr<media::VideoFrame> frame;
+ media::VideoFrame::CreateFrameGlTexture(format, width, height, textures,
+ &frame);
+ frames_[i] = frame;
+ }
+ *frames_out = frames_;
+
+ task->Run();
+ delete task;
+}
+
+void Gles2VideoDecodeContext::ReleaseAllVideoFrames() {
+ if (MessageLoop::current() != message_loop_) {
+ message_loop_->PostTask(
+ FROM_HERE,
+ NewRunnableMethod(this,
+ &Gles2VideoDecodeContext::ReleaseAllVideoFrames));
+ return;
+ }
+
+ // Make the context current and then release the video frames.
+ bool ret = ggl::MakeCurrent(context_);
+ CHECK(ret) << "Failed to switch context";
+
+ for (size_t i = 0; i < frames_.size(); ++i) {
+ for (size_t j = 0; j < frames_[i]->planes(); ++j) {
+ media::VideoFrame::GlTexture texture = frames_[i]->gl_texture(j);
+ glDeleteTextures(1, &texture);
+ }
+ }
+ frames_.clear();
+}
+
+void Gles2VideoDecodeContext::ConvertToVideoFrame(
+ void* buffer, scoped_refptr<media::VideoFrame> frame, Task* task) {
+ DCHECK(memory_mapped_);
+ // TODO(hclam): Implement.
+}
+
+void Gles2VideoDecodeContext::Destroy(Task* task) {
+ if (MessageLoop::current() != message_loop_) {
+ message_loop_->PostTask(
+ FROM_HERE,
+ NewRunnableMethod(this, &Gles2VideoDecodeContext::Destroy, task));
+ return;
+ }
+
+ ReleaseAllVideoFrames();
+ DCHECK_EQ(0u, frames_.size());
+
+ task->Run();
+ delete task;
+}
+
+DISABLE_RUNNABLE_METHOD_REFCOUNT(Gles2VideoDecodeContext);
diff --git a/content/renderer/media/gles2_video_decode_context.h b/content/renderer/media/gles2_video_decode_context.h
new file mode 100644
index 0000000..6ba7231
--- /dev/null
+++ b/content/renderer/media/gles2_video_decode_context.h
@@ -0,0 +1,113 @@
+// Copyright (c) 2010 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef CONTENT_RENDERER_MEDIA_GLES2_VIDEO_DECODE_CONTEXT_H_
+#define CONTENT_RENDERER_MEDIA_GLES2_VIDEO_DECODE_CONTEXT_H_
+
+#include <vector>
+
+#include "media/video/video_decode_context.h"
+
+class MessageLoop;
+
+namespace ggl {
+class Context;
+} // namespace ggl
+
+// FUNCTIONS
+//
+// This is a class that provides a video decode context using a ggl::Context
+// backend.
+//
+// It provides resources for a VideoDecodeEngine to store decoded video frames.
+//
+// This class is aware of the command buffer implementation of GLES2 inside the
+// Chrome renderer and keeps a reference of ggl::Context. It might use GLES2
+// commands specific to Chrome's renderer process to provide needed resources.
+//
+// There are two different kinds of video frame storage provided by this class:
+// 1. Memory mapped textures (aka software decoding mode).
+// Each texture is memory mapped and appears to the VideoDecodeEngine as
+// system memory.
+//
+// The usage of the textures is that the VideoDecodeEngine is performing
+// software video decoding and use them as if they are allocated in plain
+// system memory (in fact they are allocated in system memory and shared
+// bwith the GPU process). An additional step of uploading the content to
+// video memory is needed. Since VideoDecodeEngine is unaware of the video
+// memory, this upload operation is performed by calling
+// ConvertToVideoFrame().
+//
+// After the content is uploaded to video memory, WebKit will see the video
+// frame as textures and will perform the necessary operations for
+// rendering.
+//
+// 2. Opaque textures (aka hardware decoding mode).
+// In this mode of operation each video frame is backed by some opaque
+// textures. This is used only when hardware video decoding is used. The
+// textures needs to be generated and allocated inside the renderer process
+// first. This will establish a translation between texture ID in the
+// renderer process and the GPU process.
+//
+// The texture ID generated is used by IpcVideoDecodeEngine only to be sent
+// the GPU process. Inside the GPU process the texture ID is translated to
+// a real texture ID inside the actual context. The real texture ID is then
+// assigned to the hardware video decoder for storing the video frame.
+//
+// WebKit will see the video frame as a normal textures and perform
+// necessary render operations.
+//
+// In both operation modes, the objective is to have WebKit see the video frames
+// as regular textures.
+//
+// THREAD SEMANTICS
+//
+// All methods of this class can be called on any thread. GLES2 context and all
+// OpenGL method calls are accessed on the Render Thread. As as result all Tasks
+// given to this object are executed on the Render Thread.
+//
+// Since this class is not refcounted, it is important to destroy objects of
+// this class only when the Task given to Destroy() is called.
+//
+class Gles2VideoDecodeContext : public media::VideoDecodeContext {
+ public:
+ // |message_loop| is the message of the Render Thread.
+ // |memory_mapped| determines if textures allocated are memory mapped.
+ // |context| is the graphics context for generating textures.
+ Gles2VideoDecodeContext(MessageLoop* message_loop,
+ bool memory_mapped, ggl::Context* context);
+ virtual ~Gles2VideoDecodeContext();
+
+ // media::VideoDecodeContext implementation.
+ virtual void* GetDevice();
+ virtual void AllocateVideoFrames(
+ int frames_num, size_t width, size_t height,
+ media::VideoFrame::Format format,
+ std::vector<scoped_refptr<media::VideoFrame> >* frames_out, Task* task);
+ virtual void ReleaseAllVideoFrames();
+ virtual void ConvertToVideoFrame(void* buffer,
+ scoped_refptr<media::VideoFrame> frame,
+ Task* task);
+ virtual void Destroy(Task* task);
+
+ // Accessor of the current mode of this decode context.
+ bool IsMemoryMapped() const { return memory_mapped_; }
+
+ private:
+ // Message loop for Render Thread.
+ MessageLoop* message_loop_;
+
+ // Type of storage provided by this class.
+ bool memory_mapped_;
+
+ // Pointer to the GLES2 context.
+ ggl::Context* context_;
+
+ // VideoFrames allocated.
+ std::vector<scoped_refptr<media::VideoFrame> > frames_;
+
+ DISALLOW_COPY_AND_ASSIGN(Gles2VideoDecodeContext);
+};
+
+#endif // CONTENT_RENDERER_MEDIA_GLES2_VIDEO_DECODE_CONTEXT_H_
diff --git a/content/renderer/media/ipc_video_decoder.cc b/content/renderer/media/ipc_video_decoder.cc
new file mode 100644
index 0000000..8656e6f
--- /dev/null
+++ b/content/renderer/media/ipc_video_decoder.cc
@@ -0,0 +1,207 @@
+// Copyright (c) 2011 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "content/renderer/media/ipc_video_decoder.h"
+
+#include "base/task.h"
+#include "content/common/child_process.h"
+#include "content/renderer/ggl.h"
+#include "media/base/callback.h"
+#include "media/base/filters.h"
+#include "media/base/filter_host.h"
+#include "media/base/limits.h"
+#include "media/base/media_format.h"
+#include "media/base/video_frame.h"
+#include "media/ffmpeg/ffmpeg_common.h"
+#include "media/filters/ffmpeg_interfaces.h"
+#include "media/video/video_decode_engine.h"
+
+IpcVideoDecoder::IpcVideoDecoder(MessageLoop* message_loop,
+ ggl::Context* ggl_context)
+ : decode_context_message_loop_(message_loop),
+ ggl_context_(ggl_context) {
+}
+
+IpcVideoDecoder::~IpcVideoDecoder() {
+}
+
+void IpcVideoDecoder::Initialize(media::DemuxerStream* demuxer_stream,
+ media::FilterCallback* callback,
+ media::StatisticsCallback* statsCallback) {
+ // It doesn't matter which thread we perform initialization because
+ // all this method does is create objects and delegate the initialize
+ // messsage.
+
+ DCHECK(!demuxer_stream_);
+ demuxer_stream_ = demuxer_stream;
+ initialize_callback_.reset(callback);
+ statistics_callback_.reset(statsCallback);
+
+ // We require bit stream converter for hardware decoder.
+ demuxer_stream->EnableBitstreamConverter();
+
+ // Get the AVStream by querying for the provider interface.
+ media::AVStreamProvider* av_stream_provider;
+ if (!demuxer_stream->QueryInterface(&av_stream_provider)) {
+ media::VideoCodecInfo info = {0};
+ OnInitializeComplete(info);
+ return;
+ }
+
+ AVStream* av_stream = av_stream_provider->GetAVStream();
+
+ int width = av_stream->codec->coded_width;
+ int height = av_stream->codec->coded_height;
+ if (width > media::Limits::kMaxDimension ||
+ height > media::Limits::kMaxDimension ||
+ (width * height) > media::Limits::kMaxCanvas) {
+ media::VideoCodecInfo info = {0};
+ OnInitializeComplete(info);
+ return;
+ }
+
+ // Create a video decode context that assocates with the graphics
+ // context.
+ decode_context_.reset(
+ ggl::CreateVideoDecodeContext(
+ ggl_context_, decode_context_message_loop_, true));
+
+ // Create a hardware video decoder handle.
+ decode_engine_.reset(ggl::CreateVideoDecodeEngine(ggl_context_));
+
+ media::VideoCodecConfig config(
+ media::CodecIDToVideoCodec(av_stream->codec->codec_id),
+ width, height,
+ av_stream->r_frame_rate.num,
+ av_stream->r_frame_rate.den,
+ av_stream->codec->extradata,
+ av_stream->codec->extradata_size);
+
+ // VideoDecodeEngine will perform initialization on the message loop
+ // given to it so it doesn't matter on which thread we are calling this.
+ decode_engine_->Initialize(ChildProcess::current()->io_message_loop(), this,
+ decode_context_.get(), config);
+}
+
+const media::MediaFormat& IpcVideoDecoder::media_format() {
+ return media_format_;
+}
+
+void IpcVideoDecoder::Stop(media::FilterCallback* callback) {
+ stop_callback_.reset(callback);
+ decode_engine_->Uninitialize();
+}
+
+void IpcVideoDecoder::Pause(media::FilterCallback* callback) {
+ // TODO(hclam): It looks like that pause is not necessary so implement this
+ // later.
+ callback->Run();
+ delete callback;
+}
+
+void IpcVideoDecoder::Flush(media::FilterCallback* callback) {
+ flush_callback_.reset(callback);
+ decode_engine_->Flush();
+}
+
+void IpcVideoDecoder::Seek(base::TimeDelta time,
+ media::FilterCallback* callback) {
+ seek_callback_.reset(callback);
+ decode_engine_->Seek();
+}
+
+void IpcVideoDecoder::OnInitializeComplete(const media::VideoCodecInfo& info) {
+ DCHECK_EQ(ChildProcess::current()->io_message_loop(), MessageLoop::current());
+
+ if (info.success) {
+ media_format_.SetAsInteger(media::MediaFormat::kSurfaceType,
+ media::VideoFrame::TYPE_GL_TEXTURE);
+ media_format_.SetAsInteger(media::MediaFormat::kSurfaceFormat,
+ info.stream_info.surface_format);
+ media_format_.SetAsInteger(media::MediaFormat::kWidth,
+ info.stream_info.surface_width);
+ media_format_.SetAsInteger(media::MediaFormat::kHeight,
+ info.stream_info.surface_height);
+ media_format_.SetAsInteger(
+ media::MediaFormat::kSurfaceType,
+ static_cast<int>(media::VideoFrame::TYPE_GL_TEXTURE));
+ } else {
+ LOG(ERROR) << "IpcVideoDecoder initialization failed!";
+ host()->SetError(media::PIPELINE_ERROR_DECODE);
+ }
+
+ initialize_callback_->Run();
+ initialize_callback_.reset();
+}
+
+void IpcVideoDecoder::OnUninitializeComplete() {
+ DCHECK_EQ(ChildProcess::current()->io_message_loop(), MessageLoop::current());
+
+ // After the decode engine is uninitialized we are safe to destroy the decode
+ // context. The task will add a refcount to this object so don't need to worry
+ // about objects lifetime.
+ decode_context_->Destroy(
+ NewRunnableMethod(this, &IpcVideoDecoder::OnDestroyComplete));
+
+ // We don't need to wait for destruction of decode context to complete because
+ // it can happen asynchronously. This object and decode context will live
+ // until the destruction task is called.
+ stop_callback_->Run();
+ stop_callback_.reset();
+}
+
+void IpcVideoDecoder::OnFlushComplete() {
+ DCHECK_EQ(ChildProcess::current()->io_message_loop(), MessageLoop::current());
+ flush_callback_->Run();
+ flush_callback_.reset();
+}
+
+void IpcVideoDecoder::OnSeekComplete() {
+ DCHECK_EQ(ChildProcess::current()->io_message_loop(), MessageLoop::current());
+ seek_callback_->Run();
+ seek_callback_.reset();
+}
+
+void IpcVideoDecoder::OnError() {
+ DCHECK_EQ(ChildProcess::current()->io_message_loop(), MessageLoop::current());
+ host()->SetError(media::PIPELINE_ERROR_DECODE);
+}
+
+// This methid is called by Demuxer after a demuxed packet is produced.
+void IpcVideoDecoder::OnReadComplete(media::Buffer* buffer) {
+ decode_engine_->ConsumeVideoSample(buffer);
+}
+
+void IpcVideoDecoder::OnDestroyComplete() {
+ // We don't need to do anything in this method. Destruction of objects will
+ // occur as soon as refcount goes to 0.
+}
+
+// This method is called by VideoRenderer. We delegate the method call to
+// VideoDecodeEngine.
+void IpcVideoDecoder::ProduceVideoFrame(
+ scoped_refptr<media::VideoFrame> video_frame) {
+ decode_engine_->ProduceVideoFrame(video_frame);
+}
+
+bool IpcVideoDecoder::ProvidesBuffer() {
+ return true;
+}
+
+// This method is called by VideoDecodeEngine that a video frame is produced.
+// This is then passed to VideoRenderer.
+void IpcVideoDecoder::ConsumeVideoFrame(
+ scoped_refptr<media::VideoFrame> video_frame,
+ const media::PipelineStatistics& statistics) {
+ DCHECK(video_frame);
+ statistics_callback_->Run(statistics);
+
+ VideoFrameReady(video_frame);
+}
+
+// This method is called by VideoDecodeEngine to request a video frame. The
+// request is passed to demuxer.
+void IpcVideoDecoder::ProduceVideoSample(scoped_refptr<media::Buffer> buffer) {
+ demuxer_stream_->Read(NewCallback(this, &IpcVideoDecoder::OnReadComplete));
+}
diff --git a/content/renderer/media/ipc_video_decoder.h b/content/renderer/media/ipc_video_decoder.h
new file mode 100644
index 0000000..28e9d33
--- /dev/null
+++ b/content/renderer/media/ipc_video_decoder.h
@@ -0,0 +1,92 @@
+// Copyright (c) 2011 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef CONTENT_RENDERER_MEDIA_IPC_VIDEO_DECODER_H_
+#define CONTENT_RENDERER_MEDIA_IPC_VIDEO_DECODER_H_
+
+#include "base/time.h"
+#include "media/base/pts_heap.h"
+#include "media/base/video_frame.h"
+#include "media/filters/decoder_base.h"
+#include "media/video/video_decode_engine.h"
+#include "media/video/video_decode_context.h"
+
+struct AVRational;
+
+namespace ggl {
+class Context;
+} // namespace ggl
+
+class IpcVideoDecoder : public media::VideoDecoder,
+ public media::VideoDecodeEngine::EventHandler {
+ public:
+ IpcVideoDecoder(MessageLoop* message_loop, ggl::Context* ggl_context);
+ virtual ~IpcVideoDecoder();
+
+ // media::Filter implementation.
+ virtual void Stop(media::FilterCallback* callback);
+ virtual void Seek(base::TimeDelta time, media::FilterCallback* callback);
+ virtual void Pause(media::FilterCallback* callback);
+ virtual void Flush(media::FilterCallback* callback);
+
+ // media::VideoDecoder implementation.
+ virtual void Initialize(media::DemuxerStream* demuxer_stream,
+ media::FilterCallback* callback,
+ media::StatisticsCallback* statsCallback);
+ virtual const media::MediaFormat& media_format();
+ virtual void ProduceVideoFrame(scoped_refptr<media::VideoFrame> video_frame);
+
+ // TODO(hclam): Remove this method.
+ virtual bool ProvidesBuffer();
+
+ // media::VideoDecodeEngine::EventHandler implementation.
+ virtual void OnInitializeComplete(const media::VideoCodecInfo& info);
+ virtual void OnUninitializeComplete();
+ virtual void OnFlushComplete();
+ virtual void OnSeekComplete();
+ virtual void OnError();
+
+ // TODO(hclam): Remove this method.
+ virtual void OnFormatChange(media::VideoStreamInfo stream_info) {}
+ virtual void ProduceVideoSample(scoped_refptr<media::Buffer> buffer);
+ virtual void ConsumeVideoFrame(scoped_refptr<media::VideoFrame> frame,
+ const media::PipelineStatistics& statistics);
+
+ private:
+ void OnReadComplete(media::Buffer* buffer);
+ void OnDestroyComplete();
+
+ media::MediaFormat media_format_;
+
+ scoped_ptr<media::FilterCallback> flush_callback_;
+ scoped_ptr<media::FilterCallback> seek_callback_;
+ scoped_ptr<media::FilterCallback> initialize_callback_;
+ scoped_ptr<media::FilterCallback> stop_callback_;
+ scoped_ptr<media::StatisticsCallback> statistics_callback_;
+
+ // Pointer to the demuxer stream that will feed us compressed buffers.
+ scoped_refptr<media::DemuxerStream> demuxer_stream_;
+
+ // This is the message loop that we should assign to VideoDecodeContext.
+ MessageLoop* decode_context_message_loop_;
+
+ // A context for allocating textures and issuing GLES2 commands.
+ // TODO(hclam): A ggl::Context lives on the Render Thread while this object
+ // lives on the Video Decoder Thread, we need to take care of context lost
+ // and destruction of the context.
+ ggl::Context* ggl_context_;
+
+ // This VideoDecodeEngine translate our requests to IPC commands to the
+ // GPU process.
+ // VideoDecodeEngine should run on IO Thread instead of Render Thread to
+ // avoid dead lock during tear down of the media pipeline.
+ scoped_ptr<media::VideoDecodeEngine> decode_engine_;
+
+ // Decoding context to be used by VideoDecodeEngine.
+ scoped_ptr<media::VideoDecodeContext> decode_context_;
+
+ DISALLOW_COPY_AND_ASSIGN(IpcVideoDecoder);
+};
+
+#endif // CONTENT_RENDERER_MEDIA_IPC_VIDEO_DECODER_H_
diff --git a/content/renderer/p2p/ipc_network_manager.cc b/content/renderer/p2p/ipc_network_manager.cc
new file mode 100644
index 0000000..204a699
--- /dev/null
+++ b/content/renderer/p2p/ipc_network_manager.cc
@@ -0,0 +1,25 @@
+// Copyright (c) 2011 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "content/renderer/p2p/ipc_network_manager.h"
+
+IpcNetworkManager::IpcNetworkManager(P2PSocketDispatcher* socket_dispatcher)
+ : socket_dispatcher_(socket_dispatcher) {
+}
+
+IpcNetworkManager::~IpcNetworkManager() {
+}
+
+// TODO(sergeyu): Currently this method just adds one fake network in
+// the list. This doesn't prevent PortAllocator from allocating ports:
+// browser process chooses first IPv4-enabled interface. But this
+// approach will not work in case when there is more than one active
+// network interface. Implement this properly: get list of networks
+// from the browser.
+bool IpcNetworkManager::EnumNetworks(
+ bool include_ignored, std::vector<talk_base::Network*>* networks) {
+ networks->push_back(new talk_base::Network(
+ "chrome", "Chrome virtual network", 0, 0));
+ return true;
+}
diff --git a/content/renderer/p2p/ipc_network_manager.h b/content/renderer/p2p/ipc_network_manager.h
new file mode 100644
index 0000000..9a1bbcb
--- /dev/null
+++ b/content/renderer/p2p/ipc_network_manager.h
@@ -0,0 +1,32 @@
+// Copyright (c) 2011 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef CONTENT_RENDERER_P2P_IPC_NETWORK_MANAGER_H_
+#define CONTENT_RENDERER_P2P_IPC_NETWORK_MANAGER_H_
+
+#include <vector>
+
+#include "base/compiler_specific.h"
+#include "third_party/libjingle/source/talk/base/network.h"
+
+class P2PSocketDispatcher;
+
+// IpcNetworkManager is a NetworkManager for libjingle that gets a
+// list of network interfaces from the browser.
+class IpcNetworkManager : public talk_base::NetworkManager {
+ public:
+ // Constructor doesn't take ownership of the |socket_dispatcher|.
+ IpcNetworkManager(P2PSocketDispatcher* socket_dispatcher);
+ virtual ~IpcNetworkManager();
+
+ protected:
+ // Fills the supplied list with all usable networks.
+ virtual bool EnumNetworks(bool include_ignored,
+ std::vector<talk_base::Network*>* networks)
+ OVERRIDE;
+
+ P2PSocketDispatcher* socket_dispatcher_;
+};
+
+#endif // CONTENT_RENDERER_P2P_IPC_NETWORK_MANAGER_H_
diff --git a/content/renderer/p2p/ipc_socket_factory.cc b/content/renderer/p2p/ipc_socket_factory.cc
new file mode 100644
index 0000000..2b0b890
--- /dev/null
+++ b/content/renderer/p2p/ipc_socket_factory.cc
@@ -0,0 +1,323 @@
+// Copyright (c) 2011 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "content/renderer/p2p/ipc_socket_factory.h"
+
+#include "base/message_loop.h"
+#include "base/message_loop_proxy.h"
+#include "content/renderer/p2p/socket_client.h"
+#include "content/renderer/p2p/socket_dispatcher.h"
+#include "third_party/libjingle/source/talk/base/asyncpacketsocket.h"
+
+namespace {
+
+const size_t kIPv4AddressSize = 4;
+
+// Chromium and libjingle represent socket addresses differently. The
+// following two functions are used to convert addresses from one
+// representation to another.
+bool ChromeToLibjingleSocketAddress(const net::IPEndPoint& address_chrome,
+ talk_base::SocketAddress* address_lj) {
+ if (address_chrome.GetFamily() != AF_INET) {
+ LOG(ERROR) << "Only IPv4 addresses are supported.";
+ return false;
+ }
+ uint32 ip_as_int = ntohl(*reinterpret_cast<const uint32*>(
+ &address_chrome.address()[0]));
+ *address_lj = talk_base::SocketAddress(ip_as_int, address_chrome.port());
+ return true;
+}
+
+bool LibjingleToIPEndPoint(const talk_base::SocketAddress& address_lj,
+ net::IPEndPoint* address_chrome) {
+ uint32 ip = htonl(address_lj.ip());
+ net::IPAddressNumber address;
+ address.resize(kIPv4AddressSize);
+ memcpy(&address[0], &ip, kIPv4AddressSize);
+ *address_chrome = net::IPEndPoint(address, address_lj.port());
+ return true;
+}
+
+// IpcPacketSocket implements talk_base::AsyncPacketSocket interface
+// using P2PSocketClient that works over IPC-channel. It must be used
+// on the thread it was created.
+class IpcPacketSocket : public talk_base::AsyncPacketSocket,
+ public P2PSocketClient::Delegate {
+ public:
+ IpcPacketSocket();
+ virtual ~IpcPacketSocket();
+
+ bool Init(P2PSocketType type, P2PSocketClient* client,
+ const talk_base::SocketAddress& address);
+
+ // talk_base::AsyncPacketSocket interface.
+ virtual talk_base::SocketAddress GetLocalAddress(bool* allocated) const;
+ virtual talk_base::SocketAddress GetRemoteAddress() const;
+ virtual int Send(const void *pv, size_t cb);
+ virtual int SendTo(const void *pv, size_t cb,
+ const talk_base::SocketAddress& addr);
+ virtual int Close();
+ virtual talk_base::Socket::ConnState GetState() const;
+ virtual int GetOption(talk_base::Socket::Option opt, int* value);
+ virtual int SetOption(talk_base::Socket::Option opt, int value);
+ virtual int GetError() const;
+ virtual void SetError(int error);
+
+ // P2PSocketClient::Delegate
+ virtual void OnOpen(const net::IPEndPoint& address);
+ virtual void OnError();
+ virtual void OnDataReceived(const net::IPEndPoint& address,
+ const std::vector<char>& data);
+
+ private:
+ enum State {
+ STATE_UNINITIALIZED,
+ STATE_OPENING,
+ STATE_OPEN,
+ STATE_CLOSED,
+ STATE_ERROR,
+ };
+
+ // Message loop on which this socket was created and being used.
+ MessageLoop* message_loop_;
+
+ // Corresponding P2P socket client.
+ scoped_refptr<P2PSocketClient> client_;
+
+ // Local address is allocated by the browser process, and the
+ // renderer side doesn't know the address until it receives OnOpen()
+ // event from the browser.
+ talk_base::SocketAddress local_address_;
+ bool address_initialized_;
+
+ // Remote address for client TCP connections.
+ talk_base::SocketAddress remote_address_;
+
+ // Current state of the object.
+ State state_;
+
+ // Current error code. Valid when state_ == STATE_ERROR.
+ int error_;
+
+ DISALLOW_COPY_AND_ASSIGN(IpcPacketSocket);
+};
+
+IpcPacketSocket::IpcPacketSocket()
+ : message_loop_(MessageLoop::current()),
+ address_initialized_(false),
+ state_(STATE_UNINITIALIZED), error_(0) {
+}
+
+IpcPacketSocket::~IpcPacketSocket() {
+ if (state_ == STATE_OPENING || state_ == STATE_OPEN ||
+ state_ == STATE_ERROR) {
+ Close();
+ }
+}
+
+bool IpcPacketSocket::Init(P2PSocketType type, P2PSocketClient* client,
+ const talk_base::SocketAddress& address) {
+ DCHECK_EQ(MessageLoop::current(), message_loop_);
+ DCHECK_EQ(state_, STATE_UNINITIALIZED);
+
+ client_ = client;
+ remote_address_ = address;
+ state_ = STATE_OPENING;
+
+ net::IPEndPoint address_chrome;
+ if (!LibjingleToIPEndPoint(address, &address_chrome)) {
+ return false;
+ }
+
+ client_->Init(type, address_chrome, this,
+ base::MessageLoopProxy::CreateForCurrentThread());
+
+ return true;
+}
+
+// talk_base::AsyncPacketSocket interface.
+talk_base::SocketAddress IpcPacketSocket::GetLocalAddress(
+ bool* allocated) const {
+ DCHECK_EQ(MessageLoop::current(), message_loop_);
+
+ *allocated = address_initialized_;
+ return local_address_;
+}
+
+talk_base::SocketAddress IpcPacketSocket::GetRemoteAddress() const {
+ DCHECK_EQ(MessageLoop::current(), message_loop_);
+
+ return remote_address_;
+}
+
+int IpcPacketSocket::Send(const void *data, size_t data_size) {
+ DCHECK_EQ(MessageLoop::current(), message_loop_);
+ return SendTo(data, data_size, remote_address_);
+}
+
+int IpcPacketSocket::SendTo(const void *data, size_t data_size,
+ const talk_base::SocketAddress& address) {
+ DCHECK_EQ(MessageLoop::current(), message_loop_);
+
+ switch (state_) {
+ case STATE_UNINITIALIZED:
+ NOTREACHED();
+ return EWOULDBLOCK;
+ case STATE_OPENING:
+ return EWOULDBLOCK;
+ case STATE_CLOSED:
+ return ENOTCONN;
+ case STATE_ERROR:
+ return error_;
+ case STATE_OPEN:
+ // Continue sending the packet.
+ break;
+ }
+
+ const char* data_char = reinterpret_cast<const char*>(data);
+ std::vector<char> data_vector(data_char, data_char + data_size);
+
+ net::IPEndPoint address_chrome;
+ if (!LibjingleToIPEndPoint(address, &address_chrome)) {
+ // Just drop the packet if we failed to convert the address.
+ return 0;
+ }
+
+ client_->Send(address_chrome, data_vector);
+
+ // Fake successful send. The caller ignores result anyway.
+ return data_size;
+}
+
+int IpcPacketSocket::Close() {
+ DCHECK_EQ(MessageLoop::current(), message_loop_);
+
+ client_->Close();
+ state_ = STATE_CLOSED;
+
+ return 0;
+}
+
+talk_base::Socket::ConnState IpcPacketSocket::GetState() const {
+ DCHECK_EQ(MessageLoop::current(), message_loop_);
+
+ switch (state_) {
+ case STATE_UNINITIALIZED:
+ NOTREACHED();
+ return talk_base::Socket::CS_CONNECTING;
+
+ case STATE_OPENING:
+ return talk_base::Socket::CS_CONNECTING;
+
+ case STATE_OPEN:
+ return talk_base::Socket::CS_CONNECTED;
+
+ case STATE_CLOSED:
+ case STATE_ERROR:
+ return talk_base::Socket::CS_CLOSED;
+ }
+
+ NOTREACHED();
+ return talk_base::Socket::CS_CLOSED;
+}
+
+int IpcPacketSocket::GetOption(talk_base::Socket::Option opt, int* value) {
+ // We don't support socket options for IPC sockets.
+ return -1;
+}
+
+int IpcPacketSocket::SetOption(talk_base::Socket::Option opt, int value) {
+ // We don't support socket options for IPC sockets.
+ //
+ // TODO(sergeyu): Make sure we set proper socket options on the
+ // browser side.
+ return -1;
+}
+
+int IpcPacketSocket::GetError() const {
+ DCHECK_EQ(MessageLoop::current(), message_loop_);
+ return error_;
+}
+
+void IpcPacketSocket::SetError(int error) {
+ DCHECK_EQ(MessageLoop::current(), message_loop_);
+ error_ = error;
+}
+
+void IpcPacketSocket::OnOpen(const net::IPEndPoint& address) {
+ DCHECK_EQ(MessageLoop::current(), message_loop_);
+
+ if (!ChromeToLibjingleSocketAddress(address, &local_address_)) {
+ // Always expect correct IPv4 address to be allocated.
+ NOTREACHED();
+ }
+ SignalAddressReady(this, local_address_);
+ address_initialized_ = true;
+ state_ = STATE_OPEN;
+}
+
+void IpcPacketSocket::OnError() {
+ DCHECK_EQ(MessageLoop::current(), message_loop_);
+ state_ = STATE_ERROR;
+ error_ = ECONNABORTED;
+}
+
+void IpcPacketSocket::OnDataReceived(const net::IPEndPoint& address,
+ const std::vector<char>& data) {
+ DCHECK_EQ(MessageLoop::current(), message_loop_);
+
+ talk_base::SocketAddress address_lj;
+ if (!ChromeToLibjingleSocketAddress(address, &address_lj)) {
+ // We should always be able to convert address here because we
+ // don't expect IPv6 address on IPv4 connections.
+ NOTREACHED();
+ return;
+ }
+
+ SignalReadPacket(this, &data[0], data.size(), address_lj);
+}
+
+} // namespace
+
+IpcPacketSocketFactory::IpcPacketSocketFactory(
+ P2PSocketDispatcher* socket_dispatcher)
+ : socket_dispatcher_(socket_dispatcher) {
+}
+
+IpcPacketSocketFactory::~IpcPacketSocketFactory() {
+}
+
+talk_base::AsyncPacketSocket* IpcPacketSocketFactory::CreateUdpSocket(
+ const talk_base::SocketAddress& local_address, int min_port, int max_port) {
+ talk_base::SocketAddress crome_address;
+ P2PSocketClient* socket_client = new P2PSocketClient(socket_dispatcher_);
+ scoped_ptr<IpcPacketSocket> socket(new IpcPacketSocket());
+ // TODO(sergeyu): Respect local_address and port limits here (need
+ // to pass them over IPC channel to the browser).
+ if (!socket->Init(P2P_SOCKET_UDP, socket_client,
+ talk_base::SocketAddress())) {
+ return NULL;
+ }
+
+ // Socket increments reference count if Init() was successful.
+ return socket.release();
+}
+
+talk_base::AsyncPacketSocket* IpcPacketSocketFactory::CreateServerTcpSocket(
+ const talk_base::SocketAddress& local_address, int min_port, int max_port,
+ bool listen, bool ssl) {
+ // TODO(sergeyu): Implement this;
+ NOTIMPLEMENTED();
+ return NULL;
+}
+
+talk_base::AsyncPacketSocket* IpcPacketSocketFactory::CreateClientTcpSocket(
+ const talk_base::SocketAddress& local_address,
+ const talk_base::SocketAddress& remote_address,
+ const talk_base::ProxyInfo& proxy_info,
+ const std::string& user_agent, bool ssl) {
+ // TODO(sergeyu): Implement this;
+ NOTIMPLEMENTED();
+ return NULL;
+}
diff --git a/content/renderer/p2p/ipc_socket_factory.h b/content/renderer/p2p/ipc_socket_factory.h
new file mode 100644
index 0000000..7f57348
--- /dev/null
+++ b/content/renderer/p2p/ipc_socket_factory.h
@@ -0,0 +1,43 @@
+// Copyright (c) 2011 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef CONTENT_RENDERER_P2P_IPC_SOCKET_FACTORY_H_
+#define CONTENT_RENDERER_P2P_IPC_SOCKET_FACTORY_H_
+
+#include "base/basictypes.h"
+#include "third_party/libjingle/source/talk/base/packetsocketfactory.h"
+
+class P2PSocketDispatcher;
+
+// IpcPacketSocketFactory implements talk_base::PacketSocketFactory
+// interface for libjingle using IPC-based P2P sockets. The class must
+// be used on a thread that is a libjingle thread (implements
+// talk_base::Thread) and also has associated base::MessageLoop. Each
+// socket created by the factory must be used on the thread it was
+// created on.
+class IpcPacketSocketFactory : public talk_base::PacketSocketFactory {
+ public:
+ explicit IpcPacketSocketFactory(P2PSocketDispatcher* socket_dispatcher);
+ virtual ~IpcPacketSocketFactory();
+
+ virtual talk_base::AsyncPacketSocket* CreateUdpSocket(
+ const talk_base::SocketAddress& local_address,
+ int min_port, int max_port);
+ virtual talk_base::AsyncPacketSocket* CreateServerTcpSocket(
+ const talk_base::SocketAddress& local_address, int min_port, int max_port,
+ bool listen, bool ssl);
+ virtual talk_base::AsyncPacketSocket* CreateClientTcpSocket(
+ const talk_base::SocketAddress& local_address,
+ const talk_base::SocketAddress& remote_address,
+ const talk_base::ProxyInfo& proxy_info,
+ const std::string& user_agent,
+ bool ssl);
+
+ private:
+ P2PSocketDispatcher* socket_dispatcher_;
+
+ DISALLOW_COPY_AND_ASSIGN(IpcPacketSocketFactory);
+};
+
+#endif // CONTENT_RENDERER_P2P_IPC_SOCKET_FACTORY_H_
diff --git a/content/renderer/p2p/socket_client.cc b/content/renderer/p2p/socket_client.cc
new file mode 100644
index 0000000..c038c19
--- /dev/null
+++ b/content/renderer/p2p/socket_client.cc
@@ -0,0 +1,124 @@
+// Copyright (c) 2011 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "content/renderer/p2p/socket_client.h"
+
+#include "base/message_loop_proxy.h"
+#include "content/common/p2p_messages.h"
+#include "content/renderer/p2p/socket_dispatcher.h"
+
+P2PSocketClient::P2PSocketClient(P2PSocketDispatcher* dispatcher)
+ : dispatcher_(dispatcher),
+ ipc_message_loop_(dispatcher->message_loop()),
+ delegate_message_loop_(NULL),
+ socket_id_(0), delegate_(NULL),
+ state_(STATE_UNINITIALIZED) {
+}
+
+P2PSocketClient::~P2PSocketClient() {
+ DCHECK(state_ == STATE_CLOSED || state_ == STATE_UNINITIALIZED ||
+ state_ == STATE_ERROR);
+}
+
+void P2PSocketClient::Init(
+ P2PSocketType type, const net::IPEndPoint& address,
+ P2PSocketClient::Delegate* delegate,
+ scoped_refptr<base::MessageLoopProxy> delegate_loop) {
+ if (!ipc_message_loop_->BelongsToCurrentThread()) {
+ ipc_message_loop_->PostTask(
+ FROM_HERE, NewRunnableMethod(this, &P2PSocketClient::Init,
+ type, address, delegate, delegate_loop));
+ return;
+ }
+
+ DCHECK_EQ(state_, STATE_UNINITIALIZED);
+ state_ = STATE_OPENING;
+ delegate_ = delegate;
+ delegate_message_loop_ = delegate_loop;
+ socket_id_ = dispatcher_->RegisterClient(this);
+ dispatcher_->SendP2PMessage(
+ new P2PHostMsg_CreateSocket(0, type, socket_id_, address));
+}
+
+void P2PSocketClient::Send(const net::IPEndPoint& address,
+ const std::vector<char>& data) {
+ if (!ipc_message_loop_->BelongsToCurrentThread()) {
+ ipc_message_loop_->PostTask(
+ FROM_HERE, NewRunnableMethod(this, &P2PSocketClient::Send, address,
+ data));
+ return;
+ }
+
+ // Can send data only when the socket is open.
+ DCHECK_EQ(state_, STATE_OPEN);
+ dispatcher_->SendP2PMessage(
+ new P2PHostMsg_Send(0, socket_id_, address, data));
+}
+
+void P2PSocketClient::Close() {
+ DCHECK(delegate_message_loop_->BelongsToCurrentThread());
+
+ delegate_ = NULL;
+
+ ipc_message_loop_->PostTask(
+ FROM_HERE, NewRunnableMethod(this, &P2PSocketClient::DoClose));
+}
+
+void P2PSocketClient::DoClose() {
+ if (dispatcher_) {
+ if (state_ == STATE_OPEN || state_ == STATE_OPENING) {
+ dispatcher_->SendP2PMessage(new P2PHostMsg_DestroySocket(0, socket_id_));
+ }
+ dispatcher_->UnregisterClient(socket_id_);
+ }
+
+ state_ = STATE_CLOSED;
+}
+
+void P2PSocketClient::OnSocketCreated(const net::IPEndPoint& address) {
+ DCHECK(ipc_message_loop_->BelongsToCurrentThread());
+ DCHECK_EQ(state_, STATE_OPENING);
+ state_ = STATE_OPEN;
+
+ delegate_message_loop_->PostTask(FROM_HERE, NewRunnableMethod(
+ this, &P2PSocketClient::DeliverOnSocketCreated, address));
+}
+
+void P2PSocketClient::DeliverOnSocketCreated(const net::IPEndPoint& address) {
+ if (delegate_)
+ delegate_->OnOpen(address);
+}
+
+void P2PSocketClient::OnError() {
+ DCHECK(ipc_message_loop_->BelongsToCurrentThread());
+ state_ = STATE_ERROR;
+
+ delegate_message_loop_->PostTask(FROM_HERE, NewRunnableMethod(
+ this, &P2PSocketClient::DeliverOnError));
+}
+
+void P2PSocketClient::DeliverOnError() {
+ if (delegate_)
+ delegate_->OnError();
+}
+
+void P2PSocketClient::OnDataReceived(const net::IPEndPoint& address,
+ const std::vector<char>& data) {
+ DCHECK(ipc_message_loop_->BelongsToCurrentThread());
+ DCHECK_EQ(STATE_OPEN, state_);
+ delegate_message_loop_->PostTask(FROM_HERE, NewRunnableMethod(
+ this, &P2PSocketClient::DeliverOnDataReceived, address, data));
+}
+
+void P2PSocketClient::DeliverOnDataReceived(const net::IPEndPoint& address,
+ const std::vector<char>& data) {
+ if (delegate_)
+ delegate_->OnDataReceived(address, data);
+}
+
+void P2PSocketClient::Detach() {
+ DCHECK(ipc_message_loop_->BelongsToCurrentThread());
+ dispatcher_ = NULL;
+ OnError();
+}
diff --git a/content/renderer/p2p/socket_client.h b/content/renderer/p2p/socket_client.h
new file mode 100644
index 0000000..3f0dd23
--- /dev/null
+++ b/content/renderer/p2p/socket_client.h
@@ -0,0 +1,102 @@
+// Copyright (c) 2011 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef CONTENT_RENDERER_P2P_SOCKET_CLIENT_H_
+#define CONTENT_RENDERER_P2P_SOCKET_CLIENT_H_
+
+#include <vector>
+
+#include "base/ref_counted.h"
+#include "content/common/p2p_sockets.h"
+#include "net/base/ip_endpoint.h"
+
+namespace base {
+class MessageLoopProxy;
+} // namespace base
+
+class P2PSocketDispatcher;
+
+// P2P socket that rountes all calls over IPC.
+//
+// The object runs on two threads: IPC thread and delegate thread. The
+// IPC thread is used to interact with P2PSocketDispatcher. All
+// callbacks to the user of this class are called on the delegate
+// thread which is specified in Init().
+class P2PSocketClient : public base::RefCountedThreadSafe<P2PSocketClient> {
+ public:
+ // Delegate is called on the the same thread on the delegate thread.
+ class Delegate {
+ public:
+ virtual ~Delegate() { }
+
+ virtual void OnOpen(const net::IPEndPoint& address) = 0;
+ virtual void OnError() = 0;
+ virtual void OnDataReceived(const net::IPEndPoint& address,
+ const std::vector<char>& data) = 0;
+ };
+
+ explicit P2PSocketClient(P2PSocketDispatcher* dispatcher);
+
+ // Initialize socket of the specified |type| and connected to the
+ // specified |address|. |address| matters only when |type| is set to
+ // P2P_SOCKET_TCP_CLIENT.
+ void Init(P2PSocketType type, const net::IPEndPoint& address,
+ Delegate* delegate,
+ scoped_refptr<base::MessageLoopProxy> delegate_loop);
+
+ // Send the |data| to the |address|.
+ void Send(const net::IPEndPoint& address, const std::vector<char>& data);
+
+ // Must be called before the socket is destroyed. The delegate may
+ // not be called after |closed_task| is executed.
+ void Close();
+
+ int socket_id() const { return socket_id_; }
+
+ private:
+ enum State {
+ STATE_UNINITIALIZED,
+ STATE_OPENING,
+ STATE_OPEN,
+ STATE_CLOSED,
+ STATE_ERROR,
+ };
+
+ friend class P2PSocketDispatcher;
+
+ // Calls destructor.
+ friend class base::RefCountedThreadSafe<P2PSocketClient>;
+
+ virtual ~P2PSocketClient();
+
+ // Message handlers that run on IPC thread.
+ void OnSocketCreated(const net::IPEndPoint& address);
+ void OnError();
+ void OnDataReceived(const net::IPEndPoint& address,
+ const std::vector<char>& data);
+
+ // Proxy methods that deliver messages to the delegate thread.
+ void DeliverOnSocketCreated(const net::IPEndPoint& address);
+ void DeliverOnError();
+ void DeliverOnDataReceived(const net::IPEndPoint& address,
+ const std::vector<char>& data);
+
+ // Scheduled on the IPC thread to finish closing the connection.
+ void DoClose();
+
+
+ // Called by the dispatcher when it is destroyed.
+ void Detach();
+
+ P2PSocketDispatcher* dispatcher_;
+ scoped_refptr<base::MessageLoopProxy> ipc_message_loop_;
+ scoped_refptr<base::MessageLoopProxy> delegate_message_loop_;
+ int socket_id_;
+ Delegate* delegate_;
+ State state_;
+
+ DISALLOW_COPY_AND_ASSIGN(P2PSocketClient);
+};
+
+#endif // CONTENT_RENDERER_P2P_SOCKET_CLIENT_H_
diff --git a/content/renderer/p2p/socket_dispatcher.cc b/content/renderer/p2p/socket_dispatcher.cc
new file mode 100644
index 0000000..8ea729c
--- /dev/null
+++ b/content/renderer/p2p/socket_dispatcher.cc
@@ -0,0 +1,85 @@
+// Copyright (c) 2011 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "content/renderer/p2p/socket_dispatcher.h"
+
+#include "base/message_loop_proxy.h"
+#include "content/common/p2p_messages.h"
+
+P2PSocketDispatcher::P2PSocketDispatcher(RenderView* render_view)
+ : RenderViewObserver(render_view),
+ message_loop_(base::MessageLoopProxy::CreateForCurrentThread()) {
+}
+
+P2PSocketDispatcher::~P2PSocketDispatcher() {
+ for (IDMap<P2PSocketClient>::iterator i(&clients_); !i.IsAtEnd();
+ i.Advance()) {
+ i.GetCurrentValue()->Detach();
+ }
+}
+
+bool P2PSocketDispatcher::OnMessageReceived(const IPC::Message& message) {
+ bool handled = true;
+ IPC_BEGIN_MESSAGE_MAP(P2PSocketDispatcher, message)
+ IPC_MESSAGE_HANDLER(P2PMsg_OnSocketCreated, OnSocketCreated)
+ IPC_MESSAGE_HANDLER(P2PMsg_OnError, OnError)
+ IPC_MESSAGE_HANDLER(P2PMsg_OnDataReceived, OnDataReceived)
+ IPC_MESSAGE_UNHANDLED(handled = false)
+ IPC_END_MESSAGE_MAP()
+ return handled;
+}
+
+int P2PSocketDispatcher::RegisterClient(P2PSocketClient* client) {
+ return clients_.Add(client);
+}
+
+void P2PSocketDispatcher::UnregisterClient(int id) {
+ clients_.Remove(id);
+}
+
+void P2PSocketDispatcher::SendP2PMessage(IPC::Message* msg) {
+ msg->set_routing_id(routing_id());
+ Send(msg);
+}
+
+base::MessageLoopProxy* P2PSocketDispatcher::message_loop() {
+ return message_loop_;
+}
+
+void P2PSocketDispatcher::OnSocketCreated(
+ int socket_id, const net::IPEndPoint& address) {
+ P2PSocketClient* client = GetClient(socket_id);
+ if (client) {
+ client->OnSocketCreated(address);
+ }
+}
+
+void P2PSocketDispatcher::OnError(int socket_id) {
+ P2PSocketClient* client = GetClient(socket_id);
+ if (client) {
+ client->OnError();
+ }
+}
+
+void P2PSocketDispatcher::OnDataReceived(
+ int socket_id, const net::IPEndPoint& address,
+ const std::vector<char>& data) {
+ P2PSocketClient* client = GetClient(socket_id);
+ if (client) {
+ client->OnDataReceived(address, data);
+ }
+}
+
+P2PSocketClient* P2PSocketDispatcher::GetClient(int socket_id) {
+ P2PSocketClient* client = clients_.Lookup(socket_id);
+ if (client == NULL) {
+ // This may happen if the socket was closed, but the browser side
+ // hasn't processed the close message by the time it sends the
+ // message to the renderer.
+ VLOG(1) << "Received P2P message for socket that doesn't exist.";
+ return NULL;
+ }
+
+ return client;
+}
diff --git a/content/renderer/p2p/socket_dispatcher.h b/content/renderer/p2p/socket_dispatcher.h
new file mode 100644
index 0000000..93b9da0
--- /dev/null
+++ b/content/renderer/p2p/socket_dispatcher.h
@@ -0,0 +1,71 @@
+// Copyright (c) 2011 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// P2PSocketDispatcher is a per-renderer object that dispatchers all
+// P2P messages received from the browser and relays all P2P messages
+// sent to the browser. P2PSocketClient instances register themselves
+// with the dispatcher using RegisterClient() and UnregisterClient().
+//
+// Relationship of classes.
+//
+// P2PSocketHost P2PSocketClient
+// ^ ^
+// | |
+// v IPC v
+// P2PSocketsHost <---------> P2PSocketDispatcher
+//
+
+#ifndef CONTENT_RENDERER_P2P_SOCKET_DISPATCHER_H_
+#define CONTENT_RENDERER_P2P_SOCKET_DISPATCHER_H_
+
+#include <vector>
+
+#include "base/id_map.h"
+#include "chrome/renderer/render_view_observer.h"
+#include "content/common/p2p_sockets.h"
+#include "content/renderer/p2p/socket_client.h"
+
+namespace base {
+class MessageLoopProxy;
+} // namespace base
+
+// P2PSocketDispatcher works on the renderer thread. It dispatches all
+// messages on that thread, and all its methods must be called on the
+// same thread.
+class P2PSocketDispatcher : public RenderViewObserver {
+ public:
+ explicit P2PSocketDispatcher(RenderView* render_view);
+ virtual ~P2PSocketDispatcher();
+
+ P2PSocketClient* CreateSocket(P2PSocketType type,
+ const net::IPEndPoint& address,
+ P2PSocketClient::Delegate* delegate);
+
+ // RenderViewObserver overrides.
+ virtual bool OnMessageReceived(const IPC::Message& message);
+
+ private:
+ friend class P2PSocketClient;
+
+ // Called by P2PSocketClient.
+ int RegisterClient(P2PSocketClient* client);
+ void UnregisterClient(int id);
+ void SendP2PMessage(IPC::Message* msg);
+ base::MessageLoopProxy* message_loop();
+
+ // Incoming message handlers.
+ void OnSocketCreated(int socket_id, const net::IPEndPoint& address);
+ void OnError(int socket_id);
+ void OnDataReceived(int socket_id, const net::IPEndPoint& address,
+ const std::vector<char>& data);
+
+ P2PSocketClient* GetClient(int socket_id);
+
+ scoped_refptr<base::MessageLoopProxy> message_loop_;
+ IDMap<P2PSocketClient> clients_;
+
+ DISALLOW_COPY_AND_ASSIGN(P2PSocketDispatcher);
+};
+
+#endif // CONTENT_RENDERER_P2P_SOCKET_DISPATCHER_H_