summaryrefslogtreecommitdiffstats
path: root/content
diff options
context:
space:
mode:
authorkaren@chromium.org <karen@chromium.org@0039d316-1c4b-4281-b951-d872f2087c98>2013-08-14 23:20:34 +0000
committerkaren@chromium.org <karen@chromium.org@0039d316-1c4b-4281-b951-d872f2087c98>2013-08-14 23:20:34 +0000
commit4029f8d70b07a3b79cfa6d45219ecf33856ccb9f (patch)
treeedd03fc399b40b6f37bdd7a082fd0cd2143a868c /content
parent0299c75618f22dd69dff11588ddd39319307409a (diff)
downloadchromium_src-4029f8d70b07a3b79cfa6d45219ecf33856ccb9f.zip
chromium_src-4029f8d70b07a3b79cfa6d45219ecf33856ccb9f.tar.gz
chromium_src-4029f8d70b07a3b79cfa6d45219ecf33856ccb9f.tar.bz2
Merge 217276 "Add media::VideoEncodeAccelerator with WebRTC inte..."
> Add media::VideoEncodeAccelerator with WebRTC integration > > * Adds media::VideoEncodeAccelerator class. > * Add GpuVideoEncodeAccelerator{,Host} classes and appropriate IPC. > * Integrates into WebRTC stack with RTCVideoEncoderFactory/RTCVideoEncoder. > * Rename media::GpuVideoDecodeFactories -> media::GpuVideoAcceleratorFactories > and generalize for use by the encode accelerator implementations as well. > > BUG=260210 > BUG=170345 > TEST=local build, run on CrOS snow; local build, unittests on desktop Linux > > Review URL: https://chromiumcodereview.appspot.com/20632002 TBR=sheu@chromium.org Review URL: https://codereview.chromium.org/22875014 git-svn-id: svn://svn.chromium.org/chrome/branches/1599/src@217691 0039d316-1c4b-4281-b951-d872f2087c98
Diffstat (limited to 'content')
-rw-r--r--content/browser/renderer_host/render_process_host_impl.cc1
-rw-r--r--content/common/gpu/DEPS1
-rw-r--r--content/common/gpu/client/gpu_channel_host.cc16
-rw-r--r--content/common/gpu/client/gpu_channel_host.h5
-rw-r--r--content/common/gpu/client/gpu_video_encode_accelerator_host.cc217
-rw-r--r--content/common/gpu/client/gpu_video_encode_accelerator_host.h111
-rw-r--r--content/common/gpu/gpu_channel.cc28
-rw-r--r--content/common/gpu/gpu_channel.h9
-rw-r--r--content/common/gpu/gpu_messages.h73
-rw-r--r--content/common/gpu/media/gpu_video_encode_accelerator.cc232
-rw-r--r--content/common/gpu/media/gpu_video_encode_accelerator.h98
-rw-r--r--content/content_common.gypi4
-rw-r--r--content/content_renderer.gypi8
-rw-r--r--content/public/common/content_switches.cc3
-rw-r--r--content/public/common/content_switches.h1
-rw-r--r--content/renderer/media/media_stream_dependency_factory.cc28
-rw-r--r--content/renderer/media/renderer_gpu_video_accelerator_factories.cc (renamed from content/renderer/media/renderer_gpu_video_decoder_factories.cc)244
-rw-r--r--content/renderer/media/renderer_gpu_video_accelerator_factories.h (renamed from content/renderer/media/renderer_gpu_video_decoder_factories.h)73
-rw-r--r--content/renderer/media/rtc_video_decoder.cc12
-rw-r--r--content/renderer/media/rtc_video_decoder.h8
-rw-r--r--content/renderer/media/rtc_video_decoder_factory.cc7
-rw-r--r--content/renderer/media/rtc_video_decoder_factory.h10
-rw-r--r--content/renderer/media/rtc_video_decoder_unittest.cc10
-rw-r--r--content/renderer/media/rtc_video_encoder.cc658
-rw-r--r--content/renderer/media/rtc_video_encoder.h104
-rw-r--r--content/renderer/media/rtc_video_encoder_factory.cc110
-rw-r--r--content/renderer/media/rtc_video_encoder_factory.h48
-rw-r--r--content/renderer/media/webmediaplayer_impl.cc2
-rw-r--r--content/renderer/media/webmediaplayer_impl.h6
-rw-r--r--content/renderer/media/webmediaplayer_params.cc4
-rw-r--r--content/renderer/media/webmediaplayer_params.h9
-rw-r--r--content/renderer/render_thread_impl.cc16
-rw-r--r--content/renderer/render_thread_impl.h8
-rw-r--r--content/renderer/render_view_impl.cc4
34 files changed, 1992 insertions, 176 deletions
diff --git a/content/browser/renderer_host/render_process_host_impl.cc b/content/browser/renderer_host/render_process_host_impl.cc
index 2dd784a..e31aff2 100644
--- a/content/browser/renderer_host/render_process_host_impl.cc
+++ b/content/browser/renderer_host/render_process_host_impl.cc
@@ -943,6 +943,7 @@ void RenderProcessHostImpl::PropagateBrowserCommandLineToRenderer(
switches::kEnableWebRtcAecRecordings,
switches::kEnableWebRtcTcpServerSocket,
switches::kEnableWebRtcHWDecoding,
+ switches::kEnableWebRtcHWEncoding,
#endif
switches::kDisableWebKitMediaSource,
switches::kEnableOverscrollNotifications,
diff --git a/content/common/gpu/DEPS b/content/common/gpu/DEPS
index f42f973..7d19c09 100644
--- a/content/common/gpu/DEPS
+++ b/content/common/gpu/DEPS
@@ -3,6 +3,7 @@ include_rules = [
"+libEGL",
"+libGLESv2",
"+media/video/video_decode_accelerator.h",
+ "+media/video/video_encode_accelerator.h",
"+skia",
"+third_party/mesa",
diff --git a/content/common/gpu/client/gpu_channel_host.cc b/content/common/gpu/client/gpu_channel_host.cc
index 601c06d..2ee9f8e 100644
--- a/content/common/gpu/client/gpu_channel_host.cc
+++ b/content/common/gpu/client/gpu_channel_host.cc
@@ -13,6 +13,7 @@
#include "base/posix/eintr_wrapper.h"
#include "base/threading/thread_restrictions.h"
#include "content/common/gpu/client/command_buffer_proxy_impl.h"
+#include "content/common/gpu/client/gpu_video_encode_accelerator_host.h"
#include "content/common/gpu/gpu_messages.h"
#include "gpu/command_buffer/common/mailbox.h"
#include "ipc/ipc_sync_message_filter.h"
@@ -184,6 +185,21 @@ scoped_ptr<media::VideoDecodeAccelerator> GpuChannelHost::CreateVideoDecoder(
return proxy->CreateVideoDecoder(profile, client).Pass();
}
+scoped_ptr<media::VideoEncodeAccelerator> GpuChannelHost::CreateVideoEncoder(
+ media::VideoEncodeAccelerator::Client* client) {
+ TRACE_EVENT0("gpu", "GpuChannelHost::CreateVideoEncoder");
+
+ scoped_ptr<media::VideoEncodeAccelerator> vea;
+ int32 route_id = MSG_ROUTING_NONE;
+ if (!Send(new GpuChannelMsg_CreateVideoEncoder(&route_id)))
+ return vea.Pass();
+ if (route_id == MSG_ROUTING_NONE)
+ return vea.Pass();
+
+ vea.reset(new GpuVideoEncodeAcceleratorHost(client, this, route_id));
+ return vea.Pass();
+}
+
void GpuChannelHost::DestroyCommandBuffer(
CommandBufferProxyImpl* command_buffer) {
TRACE_EVENT0("gpu", "GpuChannelHost::DestroyCommandBuffer");
diff --git a/content/common/gpu/client/gpu_channel_host.h b/content/common/gpu/client/gpu_channel_host.h
index 6decd17..9aa3673 100644
--- a/content/common/gpu/client/gpu_channel_host.h
+++ b/content/common/gpu/client/gpu_channel_host.h
@@ -23,6 +23,7 @@
#include "ipc/ipc_channel_proxy.h"
#include "ipc/ipc_sync_channel.h"
#include "media/video/video_decode_accelerator.h"
+#include "media/video/video_encode_accelerator.h"
#include "ui/gfx/native_widget_types.h"
#include "ui/gfx/size.h"
#include "ui/gl/gpu_preference.h"
@@ -128,6 +129,10 @@ class GpuChannelHost : public IPC::Sender,
media::VideoCodecProfile profile,
media::VideoDecodeAccelerator::Client* client);
+ // Creates a video encoder in the GPU process.
+ scoped_ptr<media::VideoEncodeAccelerator> CreateVideoEncoder(
+ media::VideoEncodeAccelerator::Client* client);
+
// Destroy a command buffer created by this channel.
void DestroyCommandBuffer(CommandBufferProxyImpl* command_buffer);
diff --git a/content/common/gpu/client/gpu_video_encode_accelerator_host.cc b/content/common/gpu/client/gpu_video_encode_accelerator_host.cc
new file mode 100644
index 0000000..64f07e5
--- /dev/null
+++ b/content/common/gpu/client/gpu_video_encode_accelerator_host.cc
@@ -0,0 +1,217 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "content/common/gpu/client/gpu_video_encode_accelerator_host.h"
+
+#include "base/logging.h"
+#include "base/message_loop/message_loop_proxy.h"
+#include "content/common/gpu/client/gpu_channel_host.h"
+#include "content/common/gpu/gpu_messages.h"
+#include "content/common/gpu/media/gpu_video_encode_accelerator.h"
+#include "media/base/video_frame.h"
+
+namespace content {
+
+GpuVideoEncodeAcceleratorHost::GpuVideoEncodeAcceleratorHost(
+ media::VideoEncodeAccelerator::Client* client,
+ const scoped_refptr<GpuChannelHost>& gpu_channel_host,
+ int32 route_id)
+ : client_(client),
+ client_ptr_factory_(client_),
+ channel_(gpu_channel_host),
+ route_id_(route_id),
+ next_frame_id_(0) {
+ channel_->AddRoute(route_id_, AsWeakPtr());
+}
+
+GpuVideoEncodeAcceleratorHost::~GpuVideoEncodeAcceleratorHost() {
+ if (channel_)
+ channel_->RemoveRoute(route_id_);
+}
+
+// static
+std::vector<media::VideoEncodeAccelerator::SupportedProfile>
+GpuVideoEncodeAcceleratorHost::GetSupportedProfiles() {
+ return GpuVideoEncodeAccelerator::GetSupportedProfiles();
+}
+
+bool GpuVideoEncodeAcceleratorHost::OnMessageReceived(
+ const IPC::Message& message) {
+ bool handled = true;
+ IPC_BEGIN_MESSAGE_MAP(GpuVideoEncodeAcceleratorHost, message)
+ IPC_MESSAGE_HANDLER(AcceleratedVideoEncoderHostMsg_NotifyInitializeDone,
+ OnNotifyInitializeDone)
+ IPC_MESSAGE_HANDLER(AcceleratedVideoEncoderHostMsg_RequireBitstreamBuffers,
+ OnRequireBitstreamBuffers)
+ IPC_MESSAGE_HANDLER(AcceleratedVideoEncoderHostMsg_NotifyInputDone,
+ OnNotifyInputDone)
+ IPC_MESSAGE_HANDLER(AcceleratedVideoEncoderHostMsg_BitstreamBufferReady,
+ OnBitstreamBufferReady)
+ IPC_MESSAGE_HANDLER(AcceleratedVideoEncoderHostMsg_NotifyError,
+ OnNotifyError)
+ IPC_MESSAGE_UNHANDLED(handled = false)
+ IPC_END_MESSAGE_MAP()
+ DCHECK(handled);
+ return handled;
+}
+
+void GpuVideoEncodeAcceleratorHost::OnChannelError() {
+ DLOG(ERROR) << "OnChannelError()";
+ OnNotifyError(kPlatformFailureError);
+ if (channel_) {
+ channel_->RemoveRoute(route_id_);
+ channel_ = NULL;
+ }
+}
+
+void GpuVideoEncodeAcceleratorHost::Initialize(
+ media::VideoFrame::Format input_format,
+ const gfx::Size& input_visible_size,
+ media::VideoCodecProfile output_profile,
+ uint32 initial_bitrate) {
+ Send(new AcceleratedVideoEncoderMsg_Initialize(route_id_,
+ input_format,
+ input_visible_size,
+ output_profile,
+ initial_bitrate));
+}
+
+void GpuVideoEncodeAcceleratorHost::Encode(
+ const scoped_refptr<media::VideoFrame>& frame,
+ bool force_keyframe) {
+ if (!channel_)
+ return;
+ if (!base::SharedMemory::IsHandleValid(frame->shared_memory_handle())) {
+ DLOG(ERROR) << "Encode(): cannot encode frame not backed by shared memory";
+ NotifyError(kPlatformFailureError);
+ return;
+ }
+ base::SharedMemoryHandle handle =
+ channel_->ShareToGpuProcess(frame->shared_memory_handle());
+ if (!base::SharedMemory::IsHandleValid(handle)) {
+ DLOG(ERROR) << "Encode(): failed to duplicate buffer handle for GPU "
+ "process";
+ NotifyError(kPlatformFailureError);
+ return;
+ }
+
+ // We assume that planar frame data passed here is packed and contiguous.
+ const size_t plane_count = media::VideoFrame::NumPlanes(frame->format());
+ size_t frame_size = 0;
+ for (size_t i = 0; i < plane_count; ++i) {
+ // Cast DCHECK parameters to void* to avoid printing uint8* as a string.
+ DCHECK_EQ(reinterpret_cast<void*>(frame->data(i)),
+ reinterpret_cast<void*>((frame->data(0) + frame_size)))
+ << "plane=" << i;
+ frame_size += frame->stride(i) * frame->rows(i);
+ }
+
+ Send(new AcceleratedVideoEncoderMsg_Encode(
+ route_id_, next_frame_id_, handle, frame_size, force_keyframe));
+ frame_map_[next_frame_id_] = frame;
+
+ // Mask against 30 bits, to avoid (undefined) wraparound on signed integer.
+ next_frame_id_ = (next_frame_id_ + 1) & 0x3FFFFFFF;
+}
+
+void GpuVideoEncodeAcceleratorHost::UseOutputBitstreamBuffer(
+ const media::BitstreamBuffer& buffer) {
+ if (!channel_)
+ return;
+ base::SharedMemoryHandle handle =
+ channel_->ShareToGpuProcess(buffer.handle());
+ if (!base::SharedMemory::IsHandleValid(handle)) {
+ DLOG(ERROR) << "UseOutputBitstreamBuffer(): failed to duplicate buffer "
+ "handle for GPU process: buffer.id()=" << buffer.id();
+ NotifyError(kPlatformFailureError);
+ return;
+ }
+ Send(new AcceleratedVideoEncoderMsg_UseOutputBitstreamBuffer(
+ route_id_, buffer.id(), handle, buffer.size()));
+}
+
+void GpuVideoEncodeAcceleratorHost::RequestEncodingParametersChange(
+ uint32 bitrate,
+ uint32 framerate) {
+ Send(new AcceleratedVideoEncoderMsg_RequestEncodingParametersChange(
+ route_id_, bitrate, framerate));
+}
+
+void GpuVideoEncodeAcceleratorHost::Destroy() {
+ Send(new GpuChannelMsg_DestroyVideoEncoder(route_id_));
+ delete this;
+}
+
+void GpuVideoEncodeAcceleratorHost::NotifyError(Error error) {
+ DVLOG(2) << "NotifyError(): error=" << error;
+ base::MessageLoopProxy::current()->PostTask(
+ FROM_HERE,
+ base::Bind(&media::VideoEncodeAccelerator::Client::NotifyError,
+ client_ptr_factory_.GetWeakPtr(),
+ error));
+}
+
+void GpuVideoEncodeAcceleratorHost::OnNotifyInitializeDone() {
+ DVLOG(2) << "OnNotifyInitializeDone()";
+ if (client_)
+ client_->NotifyInitializeDone();
+}
+
+void GpuVideoEncodeAcceleratorHost::OnRequireBitstreamBuffers(
+ uint32 input_count,
+ const gfx::Size& input_coded_size,
+ uint32 output_buffer_size) {
+ DVLOG(2) << "OnRequireBitstreamBuffers(): input_count=" << input_count
+ << ", input_coded_size=" << input_coded_size.ToString()
+ << ", output_buffer_size=" << output_buffer_size;
+ if (client_) {
+ client_->RequireBitstreamBuffers(
+ input_count, input_coded_size, output_buffer_size);
+ }
+}
+
+void GpuVideoEncodeAcceleratorHost::OnNotifyInputDone(int32 frame_id) {
+ DVLOG(3) << "OnNotifyInputDone(): frame_id=" << frame_id;
+ if (!frame_map_.erase(frame_id)) {
+ DLOG(ERROR) << "OnNotifyInputDone(): "
+ "invalid frame_id=" << frame_id;
+ OnNotifyError(kPlatformFailureError);
+ return;
+ }
+}
+
+void GpuVideoEncodeAcceleratorHost::OnBitstreamBufferReady(
+ int32 bitstream_buffer_id,
+ uint32 payload_size,
+ bool key_frame) {
+ DVLOG(3) << "OnBitstreamBufferReady(): "
+ "bitstream_buffer_id=" << bitstream_buffer_id
+ << ", payload_size=" << payload_size
+ << ", key_frame=" << key_frame;
+ if (client_)
+ client_->BitstreamBufferReady(bitstream_buffer_id, payload_size, key_frame);
+}
+
+void GpuVideoEncodeAcceleratorHost::OnNotifyError(Error error) {
+ DVLOG(2) << "OnNotifyError(): error=" << error;
+ if (client_) {
+ client_->NotifyError(error);
+ client_ = NULL;
+ client_ptr_factory_.InvalidateWeakPtrs();
+ }
+}
+
+void GpuVideoEncodeAcceleratorHost::Send(IPC::Message* message) {
+ if (!channel_) {
+ DLOG(ERROR) << "Send(): no channel";
+ delete message;
+ NotifyError(kPlatformFailureError);
+ } else if (!channel_->Send(message)) {
+ DLOG(ERROR) << "Send(): sending failed: message->type()="
+ << message->type();
+ NotifyError(kPlatformFailureError);
+ }
+}
+
+} // namespace content
diff --git a/content/common/gpu/client/gpu_video_encode_accelerator_host.h b/content/common/gpu/client/gpu_video_encode_accelerator_host.h
new file mode 100644
index 0000000..860955d
--- /dev/null
+++ b/content/common/gpu/client/gpu_video_encode_accelerator_host.h
@@ -0,0 +1,111 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef CONTENT_COMMON_GPU_CLIENT_GPU_VIDEO_ENCODE_ACCELERATOR_HOST_H_
+#define CONTENT_COMMON_GPU_CLIENT_GPU_VIDEO_ENCODE_ACCELERATOR_HOST_H_
+
+#include <vector>
+
+#include "base/containers/hash_tables.h"
+#include "base/memory/ref_counted.h"
+#include "base/memory/weak_ptr.h"
+#include "ipc/ipc_listener.h"
+#include "media/video/video_encode_accelerator.h"
+
+namespace gfx {
+
+class Size;
+
+} // namespace gfx
+
+namespace media {
+
+class VideoFrame;
+
+} // namespace media
+
+namespace content {
+
+class GpuChannelHost;
+
+// This class is the renderer-side host for the VideoEncodeAccelerator in the
+// GPU process, coordinated over IPC.
+class GpuVideoEncodeAcceleratorHost
+ : public IPC::Listener,
+ public media::VideoEncodeAccelerator,
+ public base::SupportsWeakPtr<GpuVideoEncodeAcceleratorHost> {
+ public:
+ // |client| is assumed to outlive this object. Since the GpuChannelHost does
+ // _not_ own this object, a reference to |gpu_channel_host| is taken.
+ GpuVideoEncodeAcceleratorHost(
+ media::VideoEncodeAccelerator::Client* client,
+ const scoped_refptr<GpuChannelHost>& gpu_channel_host,
+ int32 route_id);
+ virtual ~GpuVideoEncodeAcceleratorHost();
+
+ // Static query for the supported profiles. This query proxies to
+ // GpuVideoEncodeAccelerator::GetSupportedProfiles().
+ static std::vector<SupportedProfile> GetSupportedProfiles();
+
+ // IPC::Listener implementation.
+ virtual bool OnMessageReceived(const IPC::Message& message) OVERRIDE;
+ virtual void OnChannelError() OVERRIDE;
+
+ // media::VideoEncodeAccelerator implementation.
+ virtual void Initialize(media::VideoFrame::Format format,
+ const gfx::Size& input_visible_size,
+ media::VideoCodecProfile output_profile,
+ uint32 initial_bitrate) OVERRIDE;
+ virtual void Encode(const scoped_refptr<media::VideoFrame>& frame,
+ bool force_keyframe) OVERRIDE;
+ virtual void UseOutputBitstreamBuffer(
+ const media::BitstreamBuffer& buffer) OVERRIDE;
+ virtual void RequestEncodingParametersChange(uint32 bitrate,
+ uint32 framerate_num) OVERRIDE;
+ virtual void Destroy() OVERRIDE;
+
+ private:
+ // Notify |client_| when an error has occured. Used when notifying from
+ // within a media::VideoEncodeAccelerator entry point, to avoid re-entrancy.
+ void NotifyError(Error error);
+
+ // IPC handlers, proxying media::VideoEncodeAccelerator::Client for the GPU
+ // process.
+ void OnNotifyInitializeDone();
+ void OnRequireBitstreamBuffers(uint32 input_count,
+ const gfx::Size& input_coded_size,
+ uint32 output_buffer_size);
+ void OnNotifyInputDone(int32 frame_id);
+ void OnBitstreamBufferReady(int32 bitstream_buffer_id,
+ uint32 payload_size,
+ bool key_frame);
+ void OnNotifyError(Error error);
+
+ void Send(IPC::Message* message);
+
+ // Weak pointer for client callbacks on the
+ // media::VideoEncodeAccelerator::Client interface.
+ media::VideoEncodeAccelerator::Client* client_;
+ // |client_ptr_factory_| is used for callbacks that need to be done through
+ // a PostTask() to avoid re-entrancy on the client.
+ base::WeakPtrFactory<VideoEncodeAccelerator::Client> client_ptr_factory_;
+
+ // IPC channel and route ID.
+ scoped_refptr<GpuChannelHost> channel_;
+ const int32 route_id_;
+
+ // media::VideoFrames sent to the encoder.
+ // base::IDMap not used here, since that takes pointers, not scoped_refptr.
+ typedef base::hash_map<int32, scoped_refptr<media::VideoFrame> > FrameMap;
+ FrameMap frame_map_;
+
+ // ID serial number for the next frame to send to the GPU process.
+ int32 next_frame_id_;
+
+ DISALLOW_COPY_AND_ASSIGN(GpuVideoEncodeAcceleratorHost);
+};
+
+} // namespace content
+
+#endif // CONTENT_COMMON_GPU_CLIENT_GPU_VIDEO_ENCODE_ACCELERATOR_HOST_H_
diff --git a/content/common/gpu/gpu_channel.cc b/content/common/gpu/gpu_channel.cc
index 2e26fac..2b920d3 100644
--- a/content/common/gpu/gpu_channel.cc
+++ b/content/common/gpu/gpu_channel.cc
@@ -20,6 +20,7 @@
#include "base/timer/timer.h"
#include "content/common/gpu/gpu_channel_manager.h"
#include "content/common/gpu/gpu_messages.h"
+#include "content/common/gpu/media/gpu_video_encode_accelerator.h"
#include "content/common/gpu/sync_point_manager.h"
#include "content/public/common/content_switches.h"
#include "crypto/hmac.h"
@@ -754,9 +755,12 @@ bool GpuChannel::OnControlMessageReceived(const IPC::Message& msg) {
bool handled = true;
IPC_BEGIN_MESSAGE_MAP(GpuChannel, msg)
IPC_MESSAGE_HANDLER(GpuChannelMsg_CreateOffscreenCommandBuffer,
- OnCreateOffscreenCommandBuffer)
+ OnCreateOffscreenCommandBuffer)
IPC_MESSAGE_HANDLER(GpuChannelMsg_DestroyCommandBuffer,
- OnDestroyCommandBuffer)
+ OnDestroyCommandBuffer)
+ IPC_MESSAGE_HANDLER(GpuChannelMsg_CreateVideoEncoder, OnCreateVideoEncoder)
+ IPC_MESSAGE_HANDLER(GpuChannelMsg_DestroyVideoEncoder,
+ OnDestroyVideoEncoder)
#if defined(OS_ANDROID)
IPC_MESSAGE_HANDLER(GpuChannelMsg_RegisterStreamTextureProxy,
OnRegisterStreamTextureProxy)
@@ -899,6 +903,26 @@ void GpuChannel::OnDestroyCommandBuffer(int32 route_id) {
}
}
+void GpuChannel::OnCreateVideoEncoder(int32* route_id) {
+ TRACE_EVENT0("gpu", "GpuChannel::OnCreateVideoEncoder");
+
+ *route_id = GenerateRouteID();
+ GpuVideoEncodeAccelerator* encoder =
+ new GpuVideoEncodeAccelerator(this, *route_id);
+ router_.AddRoute(*route_id, encoder);
+ video_encoders_.AddWithID(encoder, *route_id);
+}
+
+void GpuChannel::OnDestroyVideoEncoder(int32 route_id) {
+ TRACE_EVENT1(
+ "gpu", "GpuChannel::OnDestroyVideoEncoder", "route_id", route_id);
+ GpuVideoEncodeAccelerator* encoder = video_encoders_.Lookup(route_id);
+ if (!encoder)
+ return;
+ router_.RemoveRoute(route_id);
+ video_encoders_.Remove(route_id);
+}
+
#if defined(OS_ANDROID)
void GpuChannel::OnRegisterStreamTextureProxy(
int32 stream_id, int32* route_id) {
diff --git a/content/common/gpu/gpu_channel.h b/content/common/gpu/gpu_channel.h
index 8d775f0..0692b90 100644
--- a/content/common/gpu/gpu_channel.h
+++ b/content/common/gpu/gpu_channel.h
@@ -11,6 +11,7 @@
#include "base/id_map.h"
#include "base/memory/ref_counted.h"
#include "base/memory/scoped_ptr.h"
+#include "base/memory/scoped_vector.h"
#include "base/memory/weak_ptr.h"
#include "base/process/process.h"
#include "build/build_config.h"
@@ -49,9 +50,10 @@ class StreamTextureManagerAndroid;
namespace content {
class GpuChannelManager;
+class GpuChannelMessageFilter;
struct GpuRenderingStats;
+class GpuVideoEncodeAccelerator;
class GpuWatchdog;
-class GpuChannelMessageFilter;
// Encapsulates an IPC channel between the GPU process and one renderer
// process. On the renderer side there's a corresponding GpuChannelHost.
@@ -173,6 +175,8 @@ class GpuChannel : public IPC::Listener,
const GPUCreateCommandBufferConfig& init_params,
int32* route_id);
void OnDestroyCommandBuffer(int32 route_id);
+ void OnCreateVideoEncoder(int32* route_id);
+ void OnDestroyVideoEncoder(int32 route_id);
#if defined(OS_ANDROID)
// Register the StreamTextureProxy class with the gpu process so that all
@@ -238,6 +242,9 @@ class GpuChannel : public IPC::Listener,
StubMap stubs_;
#endif // defined (ENABLE_GPU)
+ typedef IDMap<GpuVideoEncodeAccelerator, IDMapOwnPointer> EncoderMap;
+ EncoderMap video_encoders_;
+
bool log_messages_; // True if we should log sent and received messages.
gpu::gles2::DisallowedFeatures disallowed_features_;
GpuWatchdog* watchdog_;
diff --git a/content/common/gpu/gpu_messages.h b/content/common/gpu/gpu_messages.h
index 5749c37..49493d9 100644
--- a/content/common/gpu/gpu_messages.h
+++ b/content/common/gpu/gpu_messages.h
@@ -23,7 +23,9 @@
#include "gpu/ipc/gpu_command_buffer_traits.h"
#include "ipc/ipc_channel_handle.h"
#include "ipc/ipc_message_macros.h"
+#include "media/base/video_frame.h"
#include "media/video/video_decode_accelerator.h"
+#include "media/video/video_encode_accelerator.h"
#include "ui/base/latency_info.h"
#include "ui/gfx/native_widget_types.h"
#include "ui/gfx/size.h"
@@ -217,6 +219,10 @@ IPC_STRUCT_TRAITS_BEGIN(content::GpuRenderingStats)
IPC_STRUCT_TRAITS_MEMBER(total_processing_commands_time)
IPC_STRUCT_TRAITS_END()
+IPC_ENUM_TRAITS(media::VideoFrame::Format)
+
+IPC_ENUM_TRAITS(media::VideoEncodeAccelerator::Error)
+
//------------------------------------------------------------------------------
// GPU Messages
// These are messages from the browser to the GPU process.
@@ -447,6 +453,12 @@ IPC_MESSAGE_CONTROL1(GpuChannelMsg_GenerateMailboxNamesAsync,
IPC_MESSAGE_CONTROL1(GpuChannelMsg_GenerateMailboxNamesReply,
std::vector<gpu::Mailbox> /* mailbox_names */)
+// Create a new GPU-accelerated video encoder.
+IPC_SYNC_MESSAGE_CONTROL0_1(GpuChannelMsg_CreateVideoEncoder,
+ int32 /* route_id */)
+
+IPC_MESSAGE_CONTROL1(GpuChannelMsg_DestroyVideoEncoder, int32 /* route_id */)
+
#if defined(OS_ANDROID)
// Register the StreamTextureProxy class with the GPU process, so that
// the renderer process will get notified whenever a frame becomes available.
@@ -690,3 +702,64 @@ IPC_MESSAGE_ROUTED0(AcceleratedVideoDecoderHostMsg_ResetDone)
// Video decoder has encountered an error.
IPC_MESSAGE_ROUTED1(AcceleratedVideoDecoderHostMsg_ErrorNotification,
uint32) /* Error ID */
+
+//------------------------------------------------------------------------------
+// Accelerated Video Encoder Messages
+// These messages are sent from the Renderer process to GPU process.
+
+// Initialize the accelerated encoder.
+IPC_MESSAGE_ROUTED4(AcceleratedVideoEncoderMsg_Initialize,
+ media::VideoFrame::Format /* input_format */,
+ gfx::Size /* input_visible_size */,
+ media::VideoCodecProfile /* output_profile */,
+ uint32 /* initial_bitrate */)
+
+// Queue a input buffer to the encoder to encode. |frame_id| will be returned by
+// AcceleratedVideoEncoderHostMsg_NotifyEncodeDone.
+IPC_MESSAGE_ROUTED4(AcceleratedVideoEncoderMsg_Encode,
+ int32 /* frame_id */,
+ base::SharedMemoryHandle /* buffer_handle */,
+ uint32 /* buffer_size */,
+ bool /* force_keyframe */)
+
+// Queue a buffer to the encoder for use in returning output. |buffer_id| will
+// be returned by AcceleratedVideoEncoderHostMsg_BitstreamBufferReady.
+IPC_MESSAGE_ROUTED3(AcceleratedVideoEncoderMsg_UseOutputBitstreamBuffer,
+ int32 /* buffer_id */,
+ base::SharedMemoryHandle /* buffer_handle */,
+ uint32 /* buffer_size */)
+
+// Request a runtime encoding parameter change.
+IPC_MESSAGE_ROUTED2(AcceleratedVideoEncoderMsg_RequestEncodingParametersChange,
+ uint32 /* bitrate */,
+ uint32 /* framerate */)
+
+//------------------------------------------------------------------------------
+// Accelerated Video Encoder Host Messages
+// These messages are sent from GPU process to Renderer process.
+
+// Notify of the completion of initialization.
+IPC_MESSAGE_ROUTED0(AcceleratedVideoEncoderHostMsg_NotifyInitializeDone)
+
+// Notify renderer of the input/output buffer requirements of the encoder.
+IPC_MESSAGE_ROUTED3(AcceleratedVideoEncoderHostMsg_RequireBitstreamBuffers,
+ uint32 /* input_count */,
+ gfx::Size /* input_coded_size */,
+ uint32 /* output_buffer_size */)
+
+// Notify the renderer that the encoder has finished using an input buffer.
+// There is no congruent entry point in the media::VideoEncodeAccelerator
+// interface, in VEA this same done condition is indicated by dropping the
+// reference to the media::VideoFrame passed to VEA::Encode().
+IPC_MESSAGE_ROUTED1(AcceleratedVideoEncoderHostMsg_NotifyInputDone,
+ int32 /* frame_id */)
+
+// Notify the renderer that an output buffer has been filled with encoded data.
+IPC_MESSAGE_ROUTED3(AcceleratedVideoEncoderHostMsg_BitstreamBufferReady,
+ int32 /* bitstream_buffer_id */,
+ uint32 /* payload_size */,
+ bool /* key_frame */)
+
+// Report error condition.
+IPC_MESSAGE_ROUTED1(AcceleratedVideoEncoderHostMsg_NotifyError,
+ media::VideoEncodeAccelerator::Error /* error */)
diff --git a/content/common/gpu/media/gpu_video_encode_accelerator.cc b/content/common/gpu/media/gpu_video_encode_accelerator.cc
new file mode 100644
index 0000000..b15f04b
--- /dev/null
+++ b/content/common/gpu/media/gpu_video_encode_accelerator.cc
@@ -0,0 +1,232 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "content/common/gpu/media/gpu_video_encode_accelerator.h"
+
+#include "base/callback.h"
+#include "base/logging.h"
+#include "base/memory/shared_memory.h"
+#include "base/message_loop/message_loop_proxy.h"
+#include "content/common/gpu/gpu_channel.h"
+#include "content/common/gpu/gpu_messages.h"
+#include "ipc/ipc_message_macros.h"
+#include "media/base/video_frame.h"
+
+namespace content {
+
+GpuVideoEncodeAccelerator::GpuVideoEncodeAccelerator(GpuChannel* gpu_channel,
+ int32 route_id)
+ : weak_this_factory_(this),
+ channel_(gpu_channel),
+ route_id_(route_id),
+ input_format_(media::VideoFrame::INVALID),
+ output_buffer_size_(0) {}
+
+GpuVideoEncodeAccelerator::~GpuVideoEncodeAccelerator() {
+ if (encoder_)
+ encoder_.release()->Destroy();
+}
+
+bool GpuVideoEncodeAccelerator::OnMessageReceived(const IPC::Message& message) {
+ bool handled = true;
+ IPC_BEGIN_MESSAGE_MAP(GpuVideoEncodeAccelerator, message)
+ IPC_MESSAGE_HANDLER(AcceleratedVideoEncoderMsg_Initialize, OnInitialize)
+ IPC_MESSAGE_HANDLER(AcceleratedVideoEncoderMsg_Encode, OnEncode)
+ IPC_MESSAGE_HANDLER(AcceleratedVideoEncoderMsg_UseOutputBitstreamBuffer,
+ OnUseOutputBitstreamBuffer)
+ IPC_MESSAGE_HANDLER(
+ AcceleratedVideoEncoderMsg_RequestEncodingParametersChange,
+ OnRequestEncodingParametersChange)
+ IPC_MESSAGE_UNHANDLED(handled = false)
+ IPC_END_MESSAGE_MAP()
+ return handled;
+}
+
+void GpuVideoEncodeAccelerator::OnChannelError() {
+ NotifyError(media::VideoEncodeAccelerator::kPlatformFailureError);
+ if (channel_)
+ channel_ = NULL;
+}
+
+void GpuVideoEncodeAccelerator::NotifyInitializeDone() {
+ Send(new AcceleratedVideoEncoderHostMsg_NotifyInitializeDone(route_id_));
+}
+
+void GpuVideoEncodeAccelerator::RequireBitstreamBuffers(
+ unsigned int input_count,
+ const gfx::Size& input_coded_size,
+ size_t output_buffer_size) {
+ Send(new AcceleratedVideoEncoderHostMsg_RequireBitstreamBuffers(
+ route_id_, input_count, input_coded_size, output_buffer_size));
+ input_coded_size_ = input_coded_size;
+ output_buffer_size_ = output_buffer_size;
+}
+
+void GpuVideoEncodeAccelerator::BitstreamBufferReady(int32 bitstream_buffer_id,
+ size_t payload_size,
+ bool key_frame) {
+ Send(new AcceleratedVideoEncoderHostMsg_BitstreamBufferReady(
+ route_id_, bitstream_buffer_id, payload_size, key_frame));
+}
+
+void GpuVideoEncodeAccelerator::NotifyError(
+ media::VideoEncodeAccelerator::Error error) {
+ Send(new AcceleratedVideoEncoderHostMsg_NotifyError(route_id_, error));
+}
+
+// static
+std::vector<media::VideoEncodeAccelerator::SupportedProfile>
+GpuVideoEncodeAccelerator::GetSupportedProfiles() {
+ std::vector<media::VideoEncodeAccelerator::SupportedProfile> profiles;
+
+ // TODO(sheu): return platform-specific profiles.
+ return profiles;
+}
+
+void GpuVideoEncodeAccelerator::CreateEncoder() {
+ // TODO(sheu): actual create the encoder.
+}
+
+void GpuVideoEncodeAccelerator::OnInitialize(
+ media::VideoFrame::Format input_format,
+ const gfx::Size& input_visible_size,
+ media::VideoCodecProfile output_profile,
+ uint32 initial_bitrate) {
+ DVLOG(2) << "GpuVideoEncodeAccelerator::OnInitialize(): "
+ "input_format=" << input_format
+ << ", input_visible_size=" << input_visible_size.ToString()
+ << ", output_profile=" << output_profile
+ << ", initial_bitrate=" << initial_bitrate;
+ DCHECK(!encoder_);
+
+ if (input_visible_size.width() > kint32max / input_visible_size.height()) {
+ DLOG(ERROR) << "GpuVideoEncodeAccelerator::OnInitialize(): "
+ "input_visible_size too large";
+ NotifyError(media::VideoEncodeAccelerator::kPlatformFailureError);
+ return;
+ }
+
+ CreateEncoder();
+ if (!encoder_) {
+ DLOG(ERROR) << "GpuVideoEncodeAccelerator::OnInitialize(): VEA creation "
+ "failed";
+ NotifyError(media::VideoEncodeAccelerator::kPlatformFailureError);
+ return;
+ }
+ encoder_->Initialize(
+ input_format, input_visible_size, output_profile, initial_bitrate);
+ input_format_ = input_format;
+ input_visible_size_ = input_visible_size;
+}
+
+void GpuVideoEncodeAccelerator::OnEncode(int32 frame_id,
+ base::SharedMemoryHandle buffer_handle,
+ uint32 buffer_size,
+ bool force_keyframe) {
+ DVLOG(3) << "GpuVideoEncodeAccelerator::OnEncode(): frame_id=" << frame_id
+ << ", buffer_size=" << buffer_size
+ << ", force_keyframe=" << force_keyframe;
+ if (!encoder_)
+ return;
+ if (frame_id < 0) {
+ DLOG(ERROR) << "GpuVideoEncodeAccelerator::OnEncode(): invalid frame_id="
+ << frame_id;
+ NotifyError(media::VideoEncodeAccelerator::kPlatformFailureError);
+ return;
+ }
+
+ scoped_ptr<base::SharedMemory> shm(
+ new base::SharedMemory(buffer_handle, true));
+ if (!shm->Map(buffer_size)) {
+ DLOG(ERROR) << "GpuVideoEncodeAccelerator::OnEncode(): "
+ "could not map frame_id=" << frame_id;
+ NotifyError(media::VideoEncodeAccelerator::kPlatformFailureError);
+ return;
+ }
+
+ scoped_refptr<media::VideoFrame> frame =
+ media::VideoFrame::WrapExternalSharedMemory(
+ media::VideoFrame::I420,
+ input_coded_size_,
+ gfx::Rect(input_visible_size_),
+ input_visible_size_,
+ reinterpret_cast<uint8*>(shm->memory()),
+ buffer_handle,
+ base::TimeDelta(),
+ // It's turtles all the way down...
+ base::Bind(base::IgnoreResult(&base::MessageLoopProxy::PostTask),
+ base::MessageLoopProxy::current(),
+ FROM_HERE,
+ base::Bind(&GpuVideoEncodeAccelerator::EncodeFrameFinished,
+ weak_this_factory_.GetWeakPtr(),
+ frame_id,
+ base::Passed(&shm))));
+
+ if (!frame) {
+ DLOG(ERROR) << "GpuVideoEncodeAccelerator::OnEncode(): "
+ "could not create VideoFrame for frame_id=" << frame_id;
+ NotifyError(media::VideoEncodeAccelerator::kPlatformFailureError);
+ return;
+ }
+
+ encoder_->Encode(frame, force_keyframe);
+}
+
+void GpuVideoEncodeAccelerator::OnUseOutputBitstreamBuffer(
+ int32 buffer_id,
+ base::SharedMemoryHandle buffer_handle,
+ uint32 buffer_size) {
+ DVLOG(3) << "GpuVideoEncodeAccelerator::OnUseOutputBitstreamBuffer(): "
+ "buffer_id=" << buffer_id
+ << ", buffer_size=" << buffer_size;
+ if (!encoder_)
+ return;
+ if (buffer_id < 0) {
+ DLOG(ERROR) << "GpuVideoEncodeAccelerator::OnUseOutputBitstreamBuffer(): "
+ "invalid buffer_id=" << buffer_id;
+ NotifyError(media::VideoEncodeAccelerator::kPlatformFailureError);
+ return;
+ }
+ if (buffer_size < output_buffer_size_) {
+ DLOG(ERROR) << "GpuVideoEncodeAccelerator::OnUseOutputBitstreamBuffer(): "
+ "buffer too small for buffer_id=" << buffer_id;
+ NotifyError(media::VideoEncodeAccelerator::kPlatformFailureError);
+ return;
+ }
+ encoder_->UseOutputBitstreamBuffer(
+ media::BitstreamBuffer(buffer_id, buffer_handle, buffer_size));
+}
+
+void GpuVideoEncodeAccelerator::OnRequestEncodingParametersChange(
+ uint32 bitrate,
+ uint32 framerate) {
+ DVLOG(2) << "GpuVideoEncodeAccelerator::OnRequestEncodingParametersChange(): "
+ "bitrate=" << bitrate
+ << ", framerate=" << framerate;
+ if (!encoder_)
+ return;
+ encoder_->RequestEncodingParametersChange(bitrate, framerate);
+}
+
+void GpuVideoEncodeAccelerator::EncodeFrameFinished(
+ int32 frame_id,
+ scoped_ptr<base::SharedMemory> shm) {
+ Send(new AcceleratedVideoEncoderHostMsg_NotifyInputDone(route_id_, frame_id));
+ // Just let shm fall out of scope.
+}
+
+void GpuVideoEncodeAccelerator::Send(IPC::Message* message) {
+ if (!channel_) {
+ DLOG(ERROR) << "GpuVideoEncodeAccelerator::Send(): no channel";
+ delete message;
+ return;
+ } else if (!channel_->Send(message)) {
+ DLOG(ERROR) << "GpuVideoEncodeAccelerator::Send(): sending failed: "
+ "message->type()=" << message->type();
+ NotifyError(media::VideoEncodeAccelerator::kPlatformFailureError);
+ return;
+ }
+}
+
+} // namespace content
diff --git a/content/common/gpu/media/gpu_video_encode_accelerator.h b/content/common/gpu/media/gpu_video_encode_accelerator.h
new file mode 100644
index 0000000..5a48295
--- /dev/null
+++ b/content/common/gpu/media/gpu_video_encode_accelerator.h
@@ -0,0 +1,98 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef CONTENT_COMMON_GPU_MEDIA_GPU_VIDEO_ENCODE_ACCELERATOR_H_
+#define CONTENT_COMMON_GPU_MEDIA_GPU_VIDEO_ENCODE_ACCELERATOR_H_
+
+#include <vector>
+
+#include "base/memory/scoped_ptr.h"
+#include "base/memory/weak_ptr.h"
+#include "ipc/ipc_listener.h"
+#include "media/video/video_encode_accelerator.h"
+#include "ui/gfx/size.h"
+
+namespace base {
+
+class SharedMemory;
+
+} // namespace base
+
+namespace content {
+
+class GpuChannel;
+
+// This class encapsulates the GPU process view of a VideoEncodeAccelerator,
+// wrapping the platform-specific VideoEncodeAccelerator instance. It handles
+// IPC coming in from the renderer and passes it to the underlying VEA.
+class GpuVideoEncodeAccelerator : public IPC::Listener,
+ public media::VideoEncodeAccelerator::Client {
+ public:
+ GpuVideoEncodeAccelerator(GpuChannel* gpu_channel, int32 route_id);
+ virtual ~GpuVideoEncodeAccelerator();
+
+ // IPC::Listener implementation
+ virtual bool OnMessageReceived(const IPC::Message& message) OVERRIDE;
+ virtual void OnChannelError() OVERRIDE;
+
+ // media::VideoEncodeAccelerator::Client implementation.
+ virtual void NotifyInitializeDone() OVERRIDE;
+ virtual void RequireBitstreamBuffers(unsigned int input_count,
+ const gfx::Size& input_coded_size,
+ size_t output_buffer_size) OVERRIDE;
+ virtual void BitstreamBufferReady(int32 bitstream_buffer_id,
+ size_t payload_size,
+ bool key_frame) OVERRIDE;
+ virtual void NotifyError(media::VideoEncodeAccelerator::Error error) OVERRIDE;
+
+ // Static query for supported profiles. This query calls the appropriate
+ // platform-specific version.
+ static std::vector<media::VideoEncodeAccelerator::SupportedProfile>
+ GetSupportedProfiles();
+
+ private:
+ // Create the appropriate platform-specific VEA.
+ void CreateEncoder();
+
+ // IPC handlers, proxying media::VideoEncodeAccelerator for the renderer
+ // process.
+ void OnInitialize(media::VideoFrame::Format input_format,
+ const gfx::Size& input_visible_size,
+ media::VideoCodecProfile output_profile,
+ uint32 initial_bitrate);
+ void OnEncode(int32 frame_id,
+ base::SharedMemoryHandle buffer_handle,
+ uint32 buffer_size,
+ bool force_keyframe);
+ void OnUseOutputBitstreamBuffer(int32 buffer_id,
+ base::SharedMemoryHandle buffer_handle,
+ uint32 buffer_size);
+ void OnRequestEncodingParametersChange(uint32 bitrate, uint32 framerate);
+
+ void EncodeFrameFinished(int32 frame_id, scoped_ptr<base::SharedMemory> shm);
+
+ void Send(IPC::Message* message);
+
+ // Weak pointer for media::VideoFrames that refer back to |this|.
+ base::WeakPtrFactory<GpuVideoEncodeAccelerator> weak_this_factory_;
+
+ // The GpuChannel owns this GpuVideoEncodeAccelerator and will outlive |this|.
+ GpuChannel* channel_;
+ const int32 route_id_;
+
+ // Owned pointer to the underlying VideoEncodeAccelerator.
+ scoped_ptr<media::VideoEncodeAccelerator> encoder_;
+
+ // Video encoding parameters.
+ media::VideoFrame::Format input_format_;
+ gfx::Size input_visible_size_;
+ gfx::Size input_coded_size_;
+ size_t output_buffer_size_;
+
+ DISALLOW_COPY_AND_ASSIGN(GpuVideoEncodeAccelerator);
+};
+
+} // namespace content
+
+#endif // CONTENT_COMMON_GPU_MEDIA_GPU_VIDEO_ENCODE_ACCELERATOR_H_
diff --git a/content/content_common.gypi b/content/content_common.gypi
index 0577548..0747f02 100644
--- a/content/content_common.gypi
+++ b/content/content_common.gypi
@@ -197,6 +197,8 @@
'common/gpu/client/gpu_channel_host.h',
'common/gpu/client/gpu_video_decode_accelerator_host.cc',
'common/gpu/client/gpu_video_decode_accelerator_host.h',
+ 'common/gpu/client/gpu_video_encode_accelerator_host.cc',
+ 'common/gpu/client/gpu_video_encode_accelerator_host.h',
'common/gpu/client/webgraphicscontext3d_command_buffer_impl.cc',
'common/gpu/client/webgraphicscontext3d_command_buffer_impl.h',
'common/gpu/gpu_channel.cc',
@@ -235,6 +237,8 @@
'common/gpu/media/h264_parser.h',
'common/gpu/media/gpu_video_decode_accelerator.cc',
'common/gpu/media/gpu_video_decode_accelerator.h',
+ 'common/gpu/media/gpu_video_encode_accelerator.cc',
+ 'common/gpu/media/gpu_video_encode_accelerator.h',
'common/gpu/sync_point_manager.h',
'common/gpu/sync_point_manager.cc',
'common/gpu/texture_image_transport_surface.h',
diff --git a/content/content_renderer.gypi b/content/content_renderer.gypi
index fc539f1..b44b1f4 100644
--- a/content/content_renderer.gypi
+++ b/content/content_renderer.gypi
@@ -215,8 +215,8 @@
'renderer/media/preload.h',
'renderer/media/render_media_log.cc',
'renderer/media/render_media_log.h',
- 'renderer/media/renderer_gpu_video_decoder_factories.cc',
- 'renderer/media/renderer_gpu_video_decoder_factories.h',
+ 'renderer/media/renderer_gpu_video_accelerator_factories.cc',
+ 'renderer/media/renderer_gpu_video_accelerator_factories.h',
'renderer/media/renderer_webaudiodevice_impl.cc',
'renderer/media/renderer_webaudiodevice_impl.h',
'renderer/media/renderer_webmidiaccessor_impl.cc',
@@ -612,6 +612,10 @@
'renderer/media/rtc_video_decoder.h',
'renderer/media/rtc_video_decoder_factory.cc',
'renderer/media/rtc_video_decoder_factory.h',
+ 'renderer/media/rtc_video_encoder.cc',
+ 'renderer/media/rtc_video_encoder.h',
+ 'renderer/media/rtc_video_encoder_factory.cc',
+ 'renderer/media/rtc_video_encoder_factory.h',
'renderer/media/video_destination_handler.cc',
'renderer/media/video_destination_handler.h',
'renderer/media/video_source_handler.cc',
diff --git a/content/public/common/content_switches.cc b/content/public/common/content_switches.cc
index 5169ba4..65d1b41 100644
--- a/content/public/common/content_switches.cc
+++ b/content/public/common/content_switches.cc
@@ -873,6 +873,9 @@ const char kEnableWebRtcAecRecordings[] = "enable-webrtc-aec-recordings";
// Enables HW decode acceleration for WebRTC.
const char kEnableWebRtcHWDecoding[] = "enable-webrtc-hw-decoding";
+// Enables HW encode acceleration for WebRTC.
+const char kEnableWebRtcHWEncoding[] = "enable-webrtc-hw-encoding";
+
#endif
#if defined(OS_ANDROID)
diff --git a/content/public/common/content_switches.h b/content/public/common/content_switches.h
index 949400a..bf54a95 100644
--- a/content/public/common/content_switches.h
+++ b/content/public/common/content_switches.h
@@ -248,6 +248,7 @@ CONTENT_EXPORT extern const char kDisableDeviceEnumeration[];
CONTENT_EXPORT extern const char kEnableSCTPDataChannels[];
extern const char kEnableWebRtcAecRecordings[];
extern const char kEnableWebRtcHWDecoding[];
+extern const char kEnableWebRtcHWEncoding[];
#endif
#if defined(OS_ANDROID)
diff --git a/content/renderer/media/media_stream_dependency_factory.cc b/content/renderer/media/media_stream_dependency_factory.cc
index 2d19ae0..b5adf1e 100644
--- a/content/renderer/media/media_stream_dependency_factory.cc
+++ b/content/renderer/media/media_stream_dependency_factory.cc
@@ -16,6 +16,7 @@
#include "content/renderer/media/rtc_peer_connection_handler.h"
#include "content/renderer/media/rtc_video_capturer.h"
#include "content/renderer/media/rtc_video_decoder_factory.h"
+#include "content/renderer/media/rtc_video_encoder_factory.h"
#include "content/renderer/media/video_capture_impl_manager.h"
#include "content/renderer/media/webaudio_capturer_source.h"
#include "content/renderer/media/webrtc_audio_device_impl.h"
@@ -27,7 +28,7 @@
#include "content/renderer/p2p/port_allocator.h"
#include "content/renderer/render_thread_impl.h"
#include "jingle/glue/thread_wrapper.h"
-#include "media/filters/gpu_video_decoder_factories.h"
+#include "media/filters/gpu_video_accelerator_factories.h"
#include "third_party/WebKit/public/platform/WebMediaConstraints.h"
#include "third_party/WebKit/public/platform/WebMediaStream.h"
#include "third_party/WebKit/public/platform/WebMediaStreamSource.h"
@@ -488,27 +489,32 @@ bool MediaStreamDependencyFactory::CreatePeerConnectionFactory() {
audio_device_ = new WebRtcAudioDeviceImpl();
scoped_ptr<cricket::WebRtcVideoDecoderFactory> decoder_factory;
+ scoped_ptr<cricket::WebRtcVideoEncoderFactory> encoder_factory;
const CommandLine* cmd_line = CommandLine::ForCurrentProcess();
- if (cmd_line->HasSwitch(switches::kEnableWebRtcHWDecoding)) {
- scoped_refptr<base::MessageLoopProxy> media_loop_proxy =
- RenderThreadImpl::current()->GetMediaThreadMessageLoopProxy();
- scoped_refptr<RendererGpuVideoDecoderFactories> gpu_factories =
- RenderThreadImpl::current()->GetGpuFactories(media_loop_proxy);
- if (gpu_factories.get() != NULL)
+ scoped_refptr<base::MessageLoopProxy> media_loop_proxy =
+ RenderThreadImpl::current()->GetMediaThreadMessageLoopProxy();
+ scoped_refptr<RendererGpuVideoAcceleratorFactories> gpu_factories =
+ RenderThreadImpl::current()->GetGpuFactories(media_loop_proxy);
+#if !defined(GOOGLE_TV)
+ if (cmd_line->HasSwitch(switches::kEnableWebRtcHWDecoding))
+ if (gpu_factories)
decoder_factory.reset(new RTCVideoDecoderFactory(gpu_factories));
- }
-#if defined(GOOGLE_TV)
+#else
// PeerConnectionFactory will hold the ownership of this
// VideoDecoderFactory.
- decoder_factory.reset(decoder_factory_tv_ = new RTCVideoDecoderFactoryTv);
+ decoder_factory.reset(decoder_factory_tv_ = new RTCVideoDecoderFactoryTv());
#endif
+ if (cmd_line->HasSwitch(switches::kEnableWebRtcHWEncoding))
+ if (gpu_factories)
+ encoder_factory.reset(new RTCVideoEncoderFactory(gpu_factories));
+
scoped_refptr<webrtc::PeerConnectionFactoryInterface> factory(
webrtc::CreatePeerConnectionFactory(worker_thread_,
signaling_thread_,
audio_device_.get(),
- NULL,
+ encoder_factory.release(),
decoder_factory.release()));
if (factory.get())
pc_factory_ = factory;
diff --git a/content/renderer/media/renderer_gpu_video_decoder_factories.cc b/content/renderer/media/renderer_gpu_video_accelerator_factories.cc
index d33e590..f586497 100644
--- a/content/renderer/media/renderer_gpu_video_decoder_factories.cc
+++ b/content/renderer/media/renderer_gpu_video_accelerator_factories.cc
@@ -1,8 +1,8 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Copyright 2013 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "content/renderer/media/renderer_gpu_video_decoder_factories.h"
+#include "content/renderer/media/renderer_gpu_video_accelerator_factories.h"
#include <GLES2/gl2.h>
#include <GLES2/gl2ext.h>
@@ -17,8 +17,8 @@
namespace content {
-RendererGpuVideoDecoderFactories::~RendererGpuVideoDecoderFactories() {}
-RendererGpuVideoDecoderFactories::RendererGpuVideoDecoderFactories(
+RendererGpuVideoAcceleratorFactories::~RendererGpuVideoAcceleratorFactories() {}
+RendererGpuVideoAcceleratorFactories::RendererGpuVideoAcceleratorFactories(
GpuChannelHost* gpu_channel_host,
const scoped_refptr<base::MessageLoopProxy>& message_loop,
WebGraphicsContext3DCommandBufferImpl* context)
@@ -28,32 +28,37 @@ RendererGpuVideoDecoderFactories::RendererGpuVideoDecoderFactories(
aborted_waiter_(true, false),
message_loop_async_waiter_(false, false),
render_thread_async_waiter_(false, false) {
+ // |context| is only required to support HW-accelerated decode.
+ if (!context)
+ return;
+
if (message_loop_->BelongsToCurrentThread()) {
AsyncGetContext(context);
message_loop_async_waiter_.Reset();
return;
}
// Wait for the context to be acquired.
- message_loop_->PostTask(FROM_HERE, base::Bind(
- &RendererGpuVideoDecoderFactories::AsyncGetContext,
- // Unretained to avoid ref/deref'ing |*this|, which is not yet stored in a
- // scoped_refptr. Safe because the Wait() below keeps us alive until this
- // task completes.
- base::Unretained(this),
- // OK to pass raw because the pointee is only deleted on the compositor
- // thread, and only as the result of a PostTask from the render thread
- // which can only happen after this function returns, so our PostTask will
- // run first.
- context));
+ message_loop_->PostTask(
+ FROM_HERE,
+ base::Bind(&RendererGpuVideoAcceleratorFactories::AsyncGetContext,
+ // Unretained to avoid ref/deref'ing |*this|, which is not yet
+ // stored in a scoped_refptr. Safe because the Wait() below
+ // keeps us alive until this task completes.
+ base::Unretained(this),
+ // OK to pass raw because the pointee is only deleted on the
+ // compositor thread, and only as the result of a PostTask from
+ // the render thread which can only happen after this function
+ // returns, so our PostTask will run first.
+ context));
message_loop_async_waiter_.Wait();
}
-RendererGpuVideoDecoderFactories::RendererGpuVideoDecoderFactories()
+RendererGpuVideoAcceleratorFactories::RendererGpuVideoAcceleratorFactories()
: aborted_waiter_(true, false),
message_loop_async_waiter_(false, false),
render_thread_async_waiter_(false, false) {}
-void RendererGpuVideoDecoderFactories::AsyncGetContext(
+void RendererGpuVideoAcceleratorFactories::AsyncGetContext(
WebGraphicsContext3DCommandBufferImpl* context) {
context_ = context->AsWeakPtr();
if (context_.get()) {
@@ -66,20 +71,23 @@ void RendererGpuVideoDecoderFactories::AsyncGetContext(
message_loop_async_waiter_.Signal();
}
-media::VideoDecodeAccelerator*
-RendererGpuVideoDecoderFactories::CreateVideoDecodeAccelerator(
+scoped_ptr<media::VideoDecodeAccelerator>
+RendererGpuVideoAcceleratorFactories::CreateVideoDecodeAccelerator(
media::VideoCodecProfile profile,
media::VideoDecodeAccelerator::Client* client) {
if (message_loop_->BelongsToCurrentThread()) {
AsyncCreateVideoDecodeAccelerator(profile, client);
message_loop_async_waiter_.Reset();
- return vda_.release();
+ return vda_.Pass();
}
// The VDA is returned in the vda_ member variable by the
// AsyncCreateVideoDecodeAccelerator() function.
- message_loop_->PostTask(FROM_HERE, base::Bind(
- &RendererGpuVideoDecoderFactories::AsyncCreateVideoDecodeAccelerator,
- this, profile, client));
+ message_loop_->PostTask(FROM_HERE,
+ base::Bind(&RendererGpuVideoAcceleratorFactories::
+ AsyncCreateVideoDecodeAccelerator,
+ this,
+ profile,
+ client));
base::WaitableEvent* objects[] = {&aborted_waiter_,
&message_loop_async_waiter_};
@@ -87,17 +95,49 @@ RendererGpuVideoDecoderFactories::CreateVideoDecodeAccelerator(
// If we are aborting and the VDA is created by the
// AsyncCreateVideoDecodeAccelerator() function later we need to ensure
// that it is destroyed on the same thread.
- message_loop_->PostTask(FROM_HERE, base::Bind(
- &RendererGpuVideoDecoderFactories::AsyncDestroyVideoDecodeAccelerator,
- this));
- return NULL;
+ message_loop_->PostTask(FROM_HERE,
+ base::Bind(&RendererGpuVideoAcceleratorFactories::
+ AsyncDestroyVideoDecodeAccelerator,
+ this));
+ return scoped_ptr<media::VideoDecodeAccelerator>();
}
- return vda_.release();
+ return vda_.Pass();
}
-void RendererGpuVideoDecoderFactories::AsyncCreateVideoDecodeAccelerator(
- media::VideoCodecProfile profile,
- media::VideoDecodeAccelerator::Client* client) {
+scoped_ptr<media::VideoEncodeAccelerator>
+RendererGpuVideoAcceleratorFactories::CreateVideoEncodeAccelerator(
+ media::VideoEncodeAccelerator::Client* client) {
+ if (message_loop_->BelongsToCurrentThread()) {
+ AsyncCreateVideoEncodeAccelerator(client);
+ message_loop_async_waiter_.Reset();
+ return vea_.Pass();
+ }
+ // The VEA is returned in the vea_ member variable by the
+ // AsyncCreateVideoEncodeAccelerator() function.
+ message_loop_->PostTask(FROM_HERE,
+ base::Bind(&RendererGpuVideoAcceleratorFactories::
+ AsyncCreateVideoEncodeAccelerator,
+ this,
+ client));
+
+ base::WaitableEvent* objects[] = {&aborted_waiter_,
+ &message_loop_async_waiter_};
+ if (base::WaitableEvent::WaitMany(objects, arraysize(objects)) == 0) {
+ // If we are aborting and the VDA is created by the
+ // AsyncCreateVideoEncodeAccelerator() function later we need to ensure
+ // that it is destroyed on the same thread.
+ message_loop_->PostTask(FROM_HERE,
+ base::Bind(&RendererGpuVideoAcceleratorFactories::
+ AsyncDestroyVideoEncodeAccelerator,
+ this));
+ return scoped_ptr<media::VideoEncodeAccelerator>();
+ }
+ return vea_.Pass();
+}
+
+void RendererGpuVideoAcceleratorFactories::AsyncCreateVideoDecodeAccelerator(
+ media::VideoCodecProfile profile,
+ media::VideoDecodeAccelerator::Client* client) {
DCHECK(message_loop_->BelongsToCurrentThread());
if (context_.get() && context_->GetCommandBufferProxy()) {
@@ -107,8 +147,17 @@ void RendererGpuVideoDecoderFactories::AsyncCreateVideoDecodeAccelerator(
message_loop_async_waiter_.Signal();
}
-uint32 RendererGpuVideoDecoderFactories::CreateTextures(
- int32 count, const gfx::Size& size,
+void RendererGpuVideoAcceleratorFactories::AsyncCreateVideoEncodeAccelerator(
+ media::VideoEncodeAccelerator::Client* client) {
+ DCHECK(message_loop_->BelongsToCurrentThread());
+
+ vea_ = gpu_channel_host_->CreateVideoEncoder(client).Pass();
+ message_loop_async_waiter_.Signal();
+}
+
+uint32 RendererGpuVideoAcceleratorFactories::CreateTextures(
+ int32 count,
+ const gfx::Size& size,
std::vector<uint32>* texture_ids,
std::vector<gpu::Mailbox>* texture_mailboxes,
uint32 texture_target) {
@@ -121,9 +170,14 @@ uint32 RendererGpuVideoDecoderFactories::CreateTextures(
message_loop_async_waiter_.Reset();
return sync_point;
}
- message_loop_->PostTask(FROM_HERE, base::Bind(
- &RendererGpuVideoDecoderFactories::AsyncCreateTextures, this,
- count, size, texture_target, &sync_point));
+ message_loop_->PostTask(
+ FROM_HERE,
+ base::Bind(&RendererGpuVideoAcceleratorFactories::AsyncCreateTextures,
+ this,
+ count,
+ size,
+ texture_target,
+ &sync_point));
base::WaitableEvent* objects[] = {&aborted_waiter_,
&message_loop_async_waiter_};
@@ -134,8 +188,10 @@ uint32 RendererGpuVideoDecoderFactories::CreateTextures(
return sync_point;
}
-void RendererGpuVideoDecoderFactories::AsyncCreateTextures(
- int32 count, const gfx::Size& size, uint32 texture_target,
+void RendererGpuVideoAcceleratorFactories::AsyncCreateTextures(
+ int32 count,
+ const gfx::Size& size,
+ uint32 texture_target,
uint32* sync_point) {
DCHECK(message_loop_->BelongsToCurrentThread());
DCHECK(texture_target);
@@ -157,8 +213,15 @@ void RendererGpuVideoDecoderFactories::AsyncCreateTextures(
gles2->TexParameteri(texture_target, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE);
gles2->TexParameteri(texture_target, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE);
if (texture_target == GL_TEXTURE_2D) {
- gles2->TexImage2D(texture_target, 0, GL_RGBA, size.width(), size.height(),
- 0, GL_RGBA, GL_UNSIGNED_BYTE, NULL);
+ gles2->TexImage2D(texture_target,
+ 0,
+ GL_RGBA,
+ size.width(),
+ size.height(),
+ 0,
+ GL_RGBA,
+ GL_UNSIGNED_BYTE,
+ NULL);
}
gles2->GenMailboxCHROMIUM(created_texture_mailboxes_[i].name);
gles2->ProduceTextureCHROMIUM(texture_target,
@@ -175,16 +238,20 @@ void RendererGpuVideoDecoderFactories::AsyncCreateTextures(
message_loop_async_waiter_.Signal();
}
-void RendererGpuVideoDecoderFactories::DeleteTexture(uint32 texture_id) {
+void RendererGpuVideoAcceleratorFactories::DeleteTexture(uint32 texture_id) {
if (message_loop_->BelongsToCurrentThread()) {
AsyncDeleteTexture(texture_id);
return;
}
- message_loop_->PostTask(FROM_HERE, base::Bind(
- &RendererGpuVideoDecoderFactories::AsyncDeleteTexture, this, texture_id));
+ message_loop_->PostTask(
+ FROM_HERE,
+ base::Bind(&RendererGpuVideoAcceleratorFactories::AsyncDeleteTexture,
+ this,
+ texture_id));
}
-void RendererGpuVideoDecoderFactories::AsyncDeleteTexture(uint32 texture_id) {
+void RendererGpuVideoAcceleratorFactories::AsyncDeleteTexture(
+ uint32 texture_id) {
DCHECK(message_loop_->BelongsToCurrentThread());
if (!context_.get())
return;
@@ -194,23 +261,25 @@ void RendererGpuVideoDecoderFactories::AsyncDeleteTexture(uint32 texture_id) {
DCHECK_EQ(gles2->GetError(), static_cast<GLenum>(GL_NO_ERROR));
}
-void RendererGpuVideoDecoderFactories::WaitSyncPoint(uint32 sync_point) {
+void RendererGpuVideoAcceleratorFactories::WaitSyncPoint(uint32 sync_point) {
if (message_loop_->BelongsToCurrentThread()) {
AsyncWaitSyncPoint(sync_point);
message_loop_async_waiter_.Reset();
return;
}
- message_loop_->PostTask(FROM_HERE, base::Bind(
- &RendererGpuVideoDecoderFactories::AsyncWaitSyncPoint,
- this,
- sync_point));
+ message_loop_->PostTask(
+ FROM_HERE,
+ base::Bind(&RendererGpuVideoAcceleratorFactories::AsyncWaitSyncPoint,
+ this,
+ sync_point));
base::WaitableEvent* objects[] = {&aborted_waiter_,
&message_loop_async_waiter_};
base::WaitableEvent::WaitMany(objects, arraysize(objects));
}
-void RendererGpuVideoDecoderFactories::AsyncWaitSyncPoint(uint32 sync_point) {
+void RendererGpuVideoAcceleratorFactories::AsyncWaitSyncPoint(
+ uint32 sync_point) {
DCHECK(message_loop_->BelongsToCurrentThread());
if (!context_) {
message_loop_async_waiter_.Signal();
@@ -222,9 +291,10 @@ void RendererGpuVideoDecoderFactories::AsyncWaitSyncPoint(uint32 sync_point) {
message_loop_async_waiter_.Signal();
}
-void RendererGpuVideoDecoderFactories::ReadPixels(
- uint32 texture_id, uint32 texture_target, const gfx::Size& size,
- const SkBitmap& pixels) {
+void RendererGpuVideoAcceleratorFactories::ReadPixels(uint32 texture_id,
+ uint32 texture_target,
+ const gfx::Size& size,
+ const SkBitmap& pixels) {
// SkBitmaps use the SkPixelRef object to refcount the underlying pixels.
// Multiple SkBitmaps can share a SkPixelRef instance. We use this to
// ensure that the underlying pixels in the SkBitmap passed in remain valid
@@ -232,9 +302,13 @@ void RendererGpuVideoDecoderFactories::ReadPixels(
read_pixels_bitmap_.setPixelRef(pixels.pixelRef());
if (!message_loop_->BelongsToCurrentThread()) {
- message_loop_->PostTask(FROM_HERE, base::Bind(
- &RendererGpuVideoDecoderFactories::AsyncReadPixels, this,
- texture_id, texture_target, size));
+ message_loop_->PostTask(
+ FROM_HERE,
+ base::Bind(&RendererGpuVideoAcceleratorFactories::AsyncReadPixels,
+ this,
+ texture_id,
+ texture_target,
+ size));
base::WaitableEvent* objects[] = {&aborted_waiter_,
&message_loop_async_waiter_};
if (base::WaitableEvent::WaitMany(objects, arraysize(objects)) == 0)
@@ -246,8 +320,10 @@ void RendererGpuVideoDecoderFactories::ReadPixels(
read_pixels_bitmap_.setPixelRef(NULL);
}
-void RendererGpuVideoDecoderFactories::AsyncReadPixels(
- uint32 texture_id, uint32 texture_target, const gfx::Size& size) {
+void RendererGpuVideoAcceleratorFactories::AsyncReadPixels(
+ uint32 texture_id,
+ uint32 texture_target,
+ const gfx::Size& size) {
DCHECK(message_loop_->BelongsToCurrentThread());
if (!context_.get()) {
message_loop_async_waiter_.Signal();
@@ -269,25 +345,32 @@ void RendererGpuVideoDecoderFactories::AsyncReadPixels(
GLuint fb;
gles2->GenFramebuffers(1, &fb);
gles2->BindFramebuffer(GL_FRAMEBUFFER, fb);
- gles2->FramebufferTexture2D(GL_FRAMEBUFFER, GL_COLOR_ATTACHMENT0,
- texture_target, tmp_texture, 0);
+ gles2->FramebufferTexture2D(
+ GL_FRAMEBUFFER, GL_COLOR_ATTACHMENT0, texture_target, tmp_texture, 0);
gles2->PixelStorei(GL_PACK_ALIGNMENT, 4);
- gles2->ReadPixels(0, 0, size.width(), size.height(), GL_BGRA_EXT,
- GL_UNSIGNED_BYTE, read_pixels_bitmap_.pixelRef()->pixels());
+ gles2->ReadPixels(0,
+ 0,
+ size.width(),
+ size.height(),
+ GL_BGRA_EXT,
+ GL_UNSIGNED_BYTE,
+ read_pixels_bitmap_.pixelRef()->pixels());
gles2->DeleteFramebuffers(1, &fb);
gles2->DeleteTextures(1, &tmp_texture);
DCHECK_EQ(gles2->GetError(), static_cast<GLenum>(GL_NO_ERROR));
message_loop_async_waiter_.Signal();
}
-base::SharedMemory* RendererGpuVideoDecoderFactories::CreateSharedMemory(
+base::SharedMemory* RendererGpuVideoAcceleratorFactories::CreateSharedMemory(
size_t size) {
if (main_message_loop_->BelongsToCurrentThread()) {
return ChildThread::current()->AllocateSharedMemory(size);
}
- main_message_loop_->PostTask(FROM_HERE, base::Bind(
- &RendererGpuVideoDecoderFactories::AsyncCreateSharedMemory, this,
- size));
+ main_message_loop_->PostTask(
+ FROM_HERE,
+ base::Bind(&RendererGpuVideoAcceleratorFactories::AsyncCreateSharedMemory,
+ this,
+ size));
base::WaitableEvent* objects[] = {&aborted_waiter_,
&render_thread_async_waiter_};
@@ -296,7 +379,8 @@ base::SharedMemory* RendererGpuVideoDecoderFactories::CreateSharedMemory(
return shared_memory_segment_.release();
}
-void RendererGpuVideoDecoderFactories::AsyncCreateSharedMemory(size_t size) {
+void RendererGpuVideoAcceleratorFactories::AsyncCreateSharedMemory(
+ size_t size) {
DCHECK_EQ(base::MessageLoop::current(),
ChildThread::current()->message_loop());
@@ -306,22 +390,20 @@ void RendererGpuVideoDecoderFactories::AsyncCreateSharedMemory(size_t size) {
}
scoped_refptr<base::MessageLoopProxy>
-RendererGpuVideoDecoderFactories::GetMessageLoop() {
+RendererGpuVideoAcceleratorFactories::GetMessageLoop() {
return message_loop_;
}
-void RendererGpuVideoDecoderFactories::Abort() {
- aborted_waiter_.Signal();
-}
+void RendererGpuVideoAcceleratorFactories::Abort() { aborted_waiter_.Signal(); }
-bool RendererGpuVideoDecoderFactories::IsAborted() {
+bool RendererGpuVideoAcceleratorFactories::IsAborted() {
return aborted_waiter_.IsSignaled();
}
-scoped_refptr<media::GpuVideoDecoderFactories>
-RendererGpuVideoDecoderFactories::Clone() {
- scoped_refptr<RendererGpuVideoDecoderFactories> factories =
- new RendererGpuVideoDecoderFactories();
+scoped_refptr<RendererGpuVideoAcceleratorFactories>
+RendererGpuVideoAcceleratorFactories::Clone() {
+ scoped_refptr<RendererGpuVideoAcceleratorFactories> factories =
+ new RendererGpuVideoAcceleratorFactories();
factories->message_loop_ = message_loop_;
factories->main_message_loop_ = main_message_loop_;
factories->gpu_channel_host_ = gpu_channel_host_;
@@ -329,10 +411,18 @@ RendererGpuVideoDecoderFactories::Clone() {
return factories;
}
-void RendererGpuVideoDecoderFactories::AsyncDestroyVideoDecodeAccelerator() {
+void
+RendererGpuVideoAcceleratorFactories::AsyncDestroyVideoDecodeAccelerator() {
// OK to release because Destroy() will delete the VDA instance.
if (vda_)
vda_.release()->Destroy();
}
+void
+RendererGpuVideoAcceleratorFactories::AsyncDestroyVideoEncodeAccelerator() {
+ // OK to release because Destroy() will delete the VDA instance.
+ if (vea_)
+ vea_.release()->Destroy();
+}
+
} // namespace content
diff --git a/content/renderer/media/renderer_gpu_video_decoder_factories.h b/content/renderer/media/renderer_gpu_video_accelerator_factories.h
index 32f9bcd..fcc4ffb 100644
--- a/content/renderer/media/renderer_gpu_video_decoder_factories.h
+++ b/content/renderer/media/renderer_gpu_video_accelerator_factories.h
@@ -1,9 +1,9 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Copyright 2013 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef CONTENT_RENDERER_MEDIA_RENDERER_GPU_VIDEO_DECODER_FACTORIES_H_
-#define CONTENT_RENDERER_MEDIA_RENDERER_GPU_VIDEO_DECODER_FACTORIES_H_
+#ifndef CONTENT_RENDERER_MEDIA_RENDERER_GPU_VIDEO_ACCELERATOR_FACTORIES_H_
+#define CONTENT_RENDERER_MEDIA_RENDERER_GPU_VIDEO_ACCELERATOR_FACTORIES_H_
#include <vector>
@@ -12,7 +12,7 @@
#include "base/memory/weak_ptr.h"
#include "base/synchronization/waitable_event.h"
#include "content/common/content_export.h"
-#include "media/filters/gpu_video_decoder_factories.h"
+#include "media/filters/gpu_video_accelerator_factories.h"
#include "third_party/skia/include/core/SkBitmap.h"
#include "ui/gfx/size.h"
@@ -25,7 +25,7 @@ namespace content {
class GpuChannelHost;
class WebGraphicsContext3DCommandBufferImpl;
-// Glue code to expose functionality needed by media::GpuVideoDecoder to
+// Glue code to expose functionality needed by media::GpuVideoAccelerator to
// RenderViewImpl. This class is entirely an implementation detail of
// RenderViewImpl and only has its own header to allow extraction of its
// implementation from render_view_impl.cc which is already far too large.
@@ -34,27 +34,31 @@ class WebGraphicsContext3DCommandBufferImpl;
// internally trampolined to the appropriate thread. GPU/GL-related calls go to
// the constructor-argument loop (the media thread), and shmem-related calls go
// to the render thread.
-class CONTENT_EXPORT RendererGpuVideoDecoderFactories
- : public media::GpuVideoDecoderFactories {
+class CONTENT_EXPORT RendererGpuVideoAcceleratorFactories
+ : public media::GpuVideoAcceleratorFactories {
public:
// Takes a ref on |gpu_channel_host| and tests |context| for loss before each
// use.
- RendererGpuVideoDecoderFactories(
+ RendererGpuVideoAcceleratorFactories(
GpuChannelHost* gpu_channel_host,
const scoped_refptr<base::MessageLoopProxy>& message_loop,
WebGraphicsContext3DCommandBufferImpl* wgc3dcbi);
- // media::GpuVideoDecoderFactories implementation.
- virtual media::VideoDecodeAccelerator* CreateVideoDecodeAccelerator(
- media::VideoCodecProfile profile,
- media::VideoDecodeAccelerator::Client* client) OVERRIDE;
+ // media::GpuVideoAcceleratorFactories implementation.
+ virtual scoped_ptr<media::VideoDecodeAccelerator>
+ CreateVideoDecodeAccelerator(
+ media::VideoCodecProfile profile,
+ media::VideoDecodeAccelerator::Client* client) OVERRIDE;
+ virtual scoped_ptr<media::VideoEncodeAccelerator>
+ CreateVideoEncodeAccelerator(
+ media::VideoEncodeAccelerator::Client* client) OVERRIDE;
// Creates textures and produces them into mailboxes. Returns a sync point to
// wait on before using the mailboxes, or 0 on failure.
- virtual uint32 CreateTextures(
- int32 count, const gfx::Size& size,
- std::vector<uint32>* texture_ids,
- std::vector<gpu::Mailbox>* texture_mailboxes,
- uint32 texture_target) OVERRIDE;
+ virtual uint32 CreateTextures(int32 count,
+ const gfx::Size& size,
+ std::vector<uint32>* texture_ids,
+ std::vector<gpu::Mailbox>* texture_mailboxes,
+ uint32 texture_target) OVERRIDE;
virtual void DeleteTexture(uint32 texture_id) OVERRIDE;
virtual void WaitSyncPoint(uint32 sync_point) OVERRIDE;
virtual void ReadPixels(uint32 texture_id,
@@ -65,16 +69,14 @@ class CONTENT_EXPORT RendererGpuVideoDecoderFactories
virtual scoped_refptr<base::MessageLoopProxy> GetMessageLoop() OVERRIDE;
virtual void Abort() OVERRIDE;
virtual bool IsAborted() OVERRIDE;
-
- // Makes a copy of |this|.
- scoped_refptr<media::GpuVideoDecoderFactories> Clone();
+ scoped_refptr<RendererGpuVideoAcceleratorFactories> Clone();
protected:
- friend class base::RefCountedThreadSafe<RendererGpuVideoDecoderFactories>;
- virtual ~RendererGpuVideoDecoderFactories();
+ friend class base::RefCountedThreadSafe<RendererGpuVideoAcceleratorFactories>;
+ virtual ~RendererGpuVideoAcceleratorFactories();
private:
- RendererGpuVideoDecoderFactories();
+ RendererGpuVideoAcceleratorFactories();
// Helper for the constructor to acquire the ContentGLContext on
// |message_loop_|.
@@ -85,19 +87,25 @@ class CONTENT_EXPORT RendererGpuVideoDecoderFactories
// (except for DeleteTexture, which is fire-and-forget).
// AsyncCreateSharedMemory runs on the renderer thread and the rest run on
// |message_loop_|.
- // The AsyncCreateVideoDecodeAccelerator returns its output in the vda_
- // member.
+ // AsyncCreateVideoDecodeAccelerator returns its output in the |vda_| member.
+ // AsyncCreateVideoEncodeAccelerator returns its output in the |vea_| member.
void AsyncCreateVideoDecodeAccelerator(
media::VideoCodecProfile profile,
media::VideoDecodeAccelerator::Client* client);
- void AsyncCreateTextures(int32 count, const gfx::Size& size,
- uint32 texture_target, uint32* sync_point);
+ void AsyncCreateVideoEncodeAccelerator(
+ media::VideoEncodeAccelerator::Client* client);
+ void AsyncCreateTextures(int32 count,
+ const gfx::Size& size,
+ uint32 texture_target,
+ uint32* sync_point);
void AsyncDeleteTexture(uint32 texture_id);
void AsyncWaitSyncPoint(uint32 sync_point);
- void AsyncReadPixels(uint32 texture_id, uint32 texture_target,
+ void AsyncReadPixels(uint32 texture_id,
+ uint32 texture_target,
const gfx::Size& size);
void AsyncCreateSharedMemory(size_t size);
void AsyncDestroyVideoDecodeAccelerator();
+ void AsyncDestroyVideoEncodeAccelerator();
scoped_refptr<base::MessageLoopProxy> message_loop_;
scoped_refptr<base::MessageLoopProxy> main_message_loop_;
@@ -116,9 +124,12 @@ class CONTENT_EXPORT RendererGpuVideoDecoderFactories
// message loop to indicate their completion. e.g. AsyncCreateSharedMemory.
base::WaitableEvent render_thread_async_waiter_;
- // The vda returned by the CreateVideoAcclelerator function.
+ // The vda returned by the CreateVideoDecodeAccelerator function.
scoped_ptr<media::VideoDecodeAccelerator> vda_;
+ // The vea returned by the CreateVideoEncodeAccelerator function.
+ scoped_ptr<media::VideoEncodeAccelerator> vea_;
+
// Shared memory segment which is returned by the CreateSharedMemory()
// function.
scoped_ptr<base::SharedMemory> shared_memory_segment_;
@@ -130,9 +141,9 @@ class CONTENT_EXPORT RendererGpuVideoDecoderFactories
std::vector<uint32> created_textures_;
std::vector<gpu::Mailbox> created_texture_mailboxes_;
- DISALLOW_COPY_AND_ASSIGN(RendererGpuVideoDecoderFactories);
+ DISALLOW_COPY_AND_ASSIGN(RendererGpuVideoAcceleratorFactories);
};
} // namespace content
-#endif // CONTENT_RENDERER_MEDIA_RENDERER_GPU_VIDEO_DECODER_FACTORIES_H_
+#endif // CONTENT_RENDERER_MEDIA_RENDERER_GPU_VIDEO_ACCELERATOR_FACTORIES_H_
diff --git a/content/renderer/media/rtc_video_decoder.cc b/content/renderer/media/rtc_video_decoder.cc
index 12904f1..27030f6 100644
--- a/content/renderer/media/rtc_video_decoder.cc
+++ b/content/renderer/media/rtc_video_decoder.cc
@@ -13,7 +13,7 @@
#include "base/task_runner_util.h"
#include "content/child/child_thread.h"
#include "media/base/bind_to_loop.h"
-#include "media/filters/gpu_video_decoder_factories.h"
+#include "media/filters/gpu_video_accelerator_factories.h"
#include "third_party/webrtc/system_wrappers/interface/ref_count.h"
namespace content {
@@ -69,7 +69,7 @@ RTCVideoDecoder::BufferData::BufferData() {}
RTCVideoDecoder::BufferData::~BufferData() {}
RTCVideoDecoder::RTCVideoDecoder(
- const scoped_refptr<media::GpuVideoDecoderFactories>& factories)
+ const scoped_refptr<media::GpuVideoAcceleratorFactories>& factories)
: weak_factory_(this),
weak_this_(weak_factory_.GetWeakPtr()),
factories_(factories),
@@ -122,7 +122,7 @@ RTCVideoDecoder::~RTCVideoDecoder() {
scoped_ptr<RTCVideoDecoder> RTCVideoDecoder::Create(
webrtc::VideoCodecType type,
- const scoped_refptr<media::GpuVideoDecoderFactories>& factories) {
+ const scoped_refptr<media::GpuVideoAcceleratorFactories>& factories) {
scoped_ptr<RTCVideoDecoder> decoder;
// Convert WebRTC codec type to media codec profile.
media::VideoCodecProfile profile;
@@ -136,8 +136,8 @@ scoped_ptr<RTCVideoDecoder> RTCVideoDecoder::Create(
}
decoder.reset(new RTCVideoDecoder(factories));
- decoder->vda_
- .reset(factories->CreateVideoDecodeAccelerator(profile, decoder.get()));
+ decoder->vda_ =
+ factories->CreateVideoDecodeAccelerator(profile, decoder.get()).Pass();
// vda can be NULL if VP8 is not supported.
if (decoder->vda_ != NULL) {
decoder->state_ = INITIALIZED;
@@ -397,7 +397,7 @@ scoped_refptr<media::VideoFrame> RTCVideoDecoder::CreateVideoFrame(
visible_rect,
natural_size,
timestamp_ms,
- base::Bind(&media::GpuVideoDecoderFactories::ReadPixels,
+ base::Bind(&media::GpuVideoAcceleratorFactories::ReadPixels,
factories_,
pb.texture_id(),
decoder_texture_target_,
diff --git a/content/renderer/media/rtc_video_decoder.h b/content/renderer/media/rtc_video_decoder.h
index 11e5852..7a2686e 100644
--- a/content/renderer/media/rtc_video_decoder.h
+++ b/content/renderer/media/rtc_video_decoder.h
@@ -30,7 +30,7 @@ class MessageLoopProxy;
namespace media {
class DecoderBuffer;
-class GpuVideoDecoderFactories;
+class GpuVideoAcceleratorFactories;
}
namespace content {
@@ -52,7 +52,7 @@ class CONTENT_EXPORT RTCVideoDecoder
// run on the message loop of |factories|.
static scoped_ptr<RTCVideoDecoder> Create(
webrtc::VideoCodecType type,
- const scoped_refptr<media::GpuVideoDecoderFactories>& factories);
+ const scoped_refptr<media::GpuVideoAcceleratorFactories>& factories);
// webrtc::VideoDecoder implementation.
// Called on WebRTC DecodingThread.
@@ -113,7 +113,7 @@ class CONTENT_EXPORT RTCVideoDecoder
// The meessage loop of |factories| will be saved to |vda_loop_proxy_|.
RTCVideoDecoder(
- const scoped_refptr<media::GpuVideoDecoderFactories>& factories);
+ const scoped_refptr<media::GpuVideoAcceleratorFactories>& factories);
void Initialize(base::WaitableEvent* waiter);
@@ -197,7 +197,7 @@ class CONTENT_EXPORT RTCVideoDecoder
base::WeakPtrFactory<RTCVideoDecoder> weak_factory_;
base::WeakPtr<RTCVideoDecoder> weak_this_;
- scoped_refptr<media::GpuVideoDecoderFactories> factories_;
+ scoped_refptr<media::GpuVideoAcceleratorFactories> factories_;
// The message loop to run callbacks on. This is from |factories_|.
scoped_refptr<base::MessageLoopProxy> vda_loop_proxy_;
diff --git a/content/renderer/media/rtc_video_decoder_factory.cc b/content/renderer/media/rtc_video_decoder_factory.cc
index e621735..57b6a58 100644
--- a/content/renderer/media/rtc_video_decoder_factory.cc
+++ b/content/renderer/media/rtc_video_decoder_factory.cc
@@ -6,14 +6,13 @@
#include "base/location.h"
#include "base/memory/scoped_ptr.h"
-#include "content/renderer/media/renderer_gpu_video_decoder_factories.h"
+#include "content/renderer/media/renderer_gpu_video_accelerator_factories.h"
#include "content/renderer/media/rtc_video_decoder.h"
-#include "media/filters/gpu_video_decoder_factories.h"
namespace content {
RTCVideoDecoderFactory::RTCVideoDecoderFactory(
- const scoped_refptr<RendererGpuVideoDecoderFactories>& gpu_factories)
+ const scoped_refptr<RendererGpuVideoAcceleratorFactories>& gpu_factories)
: gpu_factories_(gpu_factories) {
DVLOG(2) << "RTCVideoDecoderFactory";
}
@@ -25,7 +24,7 @@ RTCVideoDecoderFactory::~RTCVideoDecoderFactory() {
webrtc::VideoDecoder* RTCVideoDecoderFactory::CreateVideoDecoder(
webrtc::VideoCodecType type) {
DVLOG(2) << "CreateVideoDecoder";
- // RendererGpuVideoDecoderFactories is not thread safe. It cannot be shared
+ // GpuVideoAcceleratorFactories is not thread safe. It cannot be shared
// by different decoders. This method runs on Chrome_libJingle_WorkerThread
// and the child thread is blocked while this runs. We cannot create new gpu
// factories here. Clone one instead.
diff --git a/content/renderer/media/rtc_video_decoder_factory.h b/content/renderer/media/rtc_video_decoder_factory.h
index 1455d7b..f7a42a3 100644
--- a/content/renderer/media/rtc_video_decoder_factory.h
+++ b/content/renderer/media/rtc_video_decoder_factory.h
@@ -11,23 +11,19 @@
#include "third_party/libjingle/source/talk/media/webrtc/webrtcvideodecoderfactory.h"
#include "third_party/webrtc/modules/video_coding/codecs/interface/video_codec_interface.h"
-namespace media {
-class GpuVideoDecoderFactories;
-}
-
namespace webrtc {
class VideoDecoder;
}
namespace content {
-class RendererGpuVideoDecoderFactories;
+class RendererGpuVideoAcceleratorFactories;
// TODO(wuchengli): add unittest.
class CONTENT_EXPORT RTCVideoDecoderFactory
: NON_EXPORTED_BASE(public cricket::WebRtcVideoDecoderFactory) {
public:
explicit RTCVideoDecoderFactory(
- const scoped_refptr<RendererGpuVideoDecoderFactories>& gpu_factories);
+ const scoped_refptr<RendererGpuVideoAcceleratorFactories>& gpu_factories);
virtual ~RTCVideoDecoderFactory();
// Runs on Chrome_libJingle_WorkerThread. The child thread is blocked while
@@ -40,7 +36,7 @@ class CONTENT_EXPORT RTCVideoDecoderFactory
virtual void DestroyVideoDecoder(webrtc::VideoDecoder* decoder) OVERRIDE;
private:
- scoped_refptr<RendererGpuVideoDecoderFactories> gpu_factories_;
+ scoped_refptr<RendererGpuVideoAcceleratorFactories> gpu_factories_;
DISALLOW_COPY_AND_ASSIGN(RTCVideoDecoderFactory);
};
diff --git a/content/renderer/media/rtc_video_decoder_unittest.cc b/content/renderer/media/rtc_video_decoder_unittest.cc
index 2ffeb3e..3355b6a 100644
--- a/content/renderer/media/rtc_video_decoder_unittest.cc
+++ b/content/renderer/media/rtc_video_decoder_unittest.cc
@@ -8,7 +8,7 @@
#include "base/threading/thread.h"
#include "content/renderer/media/rtc_video_decoder.h"
#include "media/base/gmock_callback_support.h"
-#include "media/filters/mock_gpu_video_decoder_factories.h"
+#include "media/filters/mock_gpu_video_accelerator_factories.h"
#include "media/video/mock_video_decode_accelerator.h"
#include "testing/gtest/include/gtest/gtest.h"
@@ -25,7 +25,7 @@ class RTCVideoDecoderTest : public ::testing::Test,
webrtc::DecodedImageCallback {
public:
RTCVideoDecoderTest()
- : mock_gpu_factories_(new media::MockGpuVideoDecoderFactories),
+ : mock_gpu_factories_(new media::MockGpuVideoAcceleratorFactories),
vda_thread_("vda_thread"),
idle_waiter_(false, false) {
memset(&codec_, 0, sizeof(codec_));
@@ -37,11 +37,11 @@ class RTCVideoDecoderTest : public ::testing::Test,
mock_vda_ = new media::MockVideoDecodeAccelerator;
EXPECT_CALL(*mock_gpu_factories_, GetMessageLoop())
.WillRepeatedly(Return(vda_loop_proxy_));
- EXPECT_CALL(*mock_gpu_factories_, CreateVideoDecodeAccelerator(_, _))
+ EXPECT_CALL(*mock_gpu_factories_, DoCreateVideoDecodeAccelerator(_, _))
.WillRepeatedly(
Return(static_cast<media::VideoDecodeAccelerator*>(NULL)));
EXPECT_CALL(*mock_gpu_factories_,
- CreateVideoDecodeAccelerator(media::VP8PROFILE_MAIN, _))
+ DoCreateVideoDecodeAccelerator(media::VP8PROFILE_MAIN, _))
.WillRepeatedly(Return(mock_vda_));
EXPECT_CALL(*mock_gpu_factories_, Abort()).WillRepeatedly(Return());
EXPECT_CALL(*mock_gpu_factories_, CreateSharedMemory(_))
@@ -94,7 +94,7 @@ class RTCVideoDecoderTest : public ::testing::Test,
}
protected:
- scoped_refptr<media::MockGpuVideoDecoderFactories> mock_gpu_factories_;
+ scoped_refptr<media::MockGpuVideoAcceleratorFactories> mock_gpu_factories_;
media::MockVideoDecodeAccelerator* mock_vda_;
scoped_ptr<RTCVideoDecoder> rtc_decoder_;
webrtc::VideoCodec codec_;
diff --git a/content/renderer/media/rtc_video_encoder.cc b/content/renderer/media/rtc_video_encoder.cc
new file mode 100644
index 0000000..416317d
--- /dev/null
+++ b/content/renderer/media/rtc_video_encoder.cc
@@ -0,0 +1,658 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "content/renderer/media/rtc_video_encoder.h"
+
+#include "base/bind.h"
+#include "base/location.h"
+#include "base/logging.h"
+#include "base/memory/scoped_vector.h"
+#include "base/message_loop/message_loop_proxy.h"
+#include "base/synchronization/waitable_event.h"
+#include "content/renderer/media/renderer_gpu_video_accelerator_factories.h"
+#include "media/base/bitstream_buffer.h"
+#include "media/base/video_frame.h"
+#include "media/filters/gpu_video_accelerator_factories.h"
+#include "media/video/video_encode_accelerator.h"
+
+#define NOTIFY_ERROR(x) \
+ do { \
+ DLOG(ERROR) << "calling NotifyError(): " << x; \
+ NotifyError(x); \
+ } while (0)
+
+namespace content {
+
+// This private class of RTCVideoEncoder does the actual work of communicating
+// with a media::VideoEncodeAccelerator for handling video encoding. It can
+// be created on any thread, but should subsequently be posted to (and Destroy()
+// called on) a single thread. Callbacks to RTCVideoEncoder are posted to the
+// thread on which the instance was constructed.
+//
+// This class separates state related to the thread that RTCVideoEncoder
+// operates on (presently the libjingle worker thread) from the thread that
+// |gpu_factories_| provides for accelerator operations (presently the media
+// thread). The RTCVideoEncoder class can be deleted directly by WebRTC, while
+// RTCVideoEncoder::Impl stays around long enough to properly shut down the VEA.
+class RTCVideoEncoder::Impl
+ : public media::VideoEncodeAccelerator::Client,
+ public base::RefCountedThreadSafe<RTCVideoEncoder::Impl> {
+ public:
+ Impl(
+ const base::WeakPtr<RTCVideoEncoder>& weak_encoder,
+ const scoped_refptr<RendererGpuVideoAcceleratorFactories>& gpu_factories);
+
+ // Create the VEA and call Initialize() on it. Called once per instantiation,
+ // and then the instance is bound forevermore to whichever thread made the
+ // call.
+ // RTCVideoEncoder expects to be able to call this function synchronously from
+ // its own thread, hence the |async_waiter| and |async_retval| arguments.
+ void CreateAndInitializeVEA(const gfx::Size& input_visible_size,
+ uint32 bitrate,
+ media::VideoCodecProfile profile,
+ base::WaitableEvent* async_waiter,
+ int32_t* async_retval);
+ // Enqueue a frame from WebRTC for encoding.
+ // RTCVideoEncoder expects to be able to call this function synchronously from
+ // its own thread, hence the |async_waiter| and |async_retval| arguments.
+ void Enqueue(const webrtc::I420VideoFrame* input_frame,
+ bool force_keyframe,
+ base::WaitableEvent* async_waiter,
+ int32_t* async_retval);
+
+ // RTCVideoEncoder is given a buffer to be passed to WebRTC through the
+ // RTCVideoEncoder::ReturnEncodedImage() function. When that is complete,
+ // the buffer is returned to Impl by its index using this function.
+ void UseOutputBitstreamBufferId(int32 bitstream_buffer_id);
+
+ // Request encoding parameter change for the underlying encoder.
+ void RequestEncodingParametersChange(uint32 bitrate, uint32 framerate);
+
+ // Destroy this Impl's encoder. The destructor is not explicitly called, as
+ // Impl is a base::RefCountedThreadSafe.
+ void Destroy();
+
+ // media::VideoEncodeAccelerator::Client implementation.
+ virtual void NotifyInitializeDone() OVERRIDE;
+ virtual void RequireBitstreamBuffers(unsigned int input_count,
+ const gfx::Size& input_coded_size,
+ size_t output_buffer_size) OVERRIDE;
+ virtual void BitstreamBufferReady(int32 bitstream_buffer_id,
+ size_t payload_size,
+ bool key_frame) OVERRIDE;
+ virtual void NotifyError(media::VideoEncodeAccelerator::Error error) OVERRIDE;
+
+ private:
+ friend class base::RefCountedThreadSafe<Impl>;
+
+ enum {
+ kInputBufferExtraCount = 1, // The number of input buffers allocated, more
+ // than what is requested by
+ // VEA::RequireBitstreamBuffers().
+ kOutputBufferCount = 3,
+ };
+
+ virtual ~Impl();
+
+ // Perform encoding on an input frame from the input queue.
+ void EncodeOneFrame();
+
+ // Notify that an input frame is finished for encoding. |index| is the index
+ // of the completed frame in |input_buffers_|.
+ void EncodeFrameFinished(int index);
+
+ // Set up/signal |async_waiter_| and |async_retval_|; see declarations below.
+ void RegisterAsyncWaiter(base::WaitableEvent* waiter, int32_t* retval);
+ void SignalAsyncWaiter(int32_t retval);
+
+ base::ThreadChecker thread_checker_;
+
+ // Weak pointer to the parent RTCVideoEncoder, for posting back VEA::Client
+ // notifications.
+ const base::WeakPtr<RTCVideoEncoder> weak_encoder_;
+
+ // The message loop on which to post callbacks to |weak_encoder_|.
+ const scoped_refptr<base::MessageLoopProxy> encoder_message_loop_proxy_;
+
+ // Factory for creating VEAs, shared memory buffers, etc.
+ const scoped_refptr<RendererGpuVideoAcceleratorFactories> gpu_factories_;
+
+ // webrtc::VideoEncoder expects InitEncode() and Encode() to be synchronous.
+ // Do this by waiting on the |async_waiter_| and returning the return value in
+ // |async_retval_| when initialization completes, encoding completes, or
+ // an error occurs.
+ base::WaitableEvent* async_waiter_;
+ int32_t* async_retval_;
+
+ // The underlying VEA to perform encoding on.
+ scoped_ptr<media::VideoEncodeAccelerator> video_encoder_;
+
+ // Next input frame. Since there is at most one next frame, a single-element
+ // queue is sufficient.
+ const webrtc::I420VideoFrame* input_next_frame_;
+
+ // Whether to encode a keyframe next.
+ bool input_next_frame_keyframe_;
+
+ // Frame sizes.
+ gfx::Size input_frame_coded_size_;
+ gfx::Size input_visible_size_;
+
+ // Shared memory buffers for input/output with the VEA.
+ ScopedVector<base::SharedMemory> input_buffers_;
+ ScopedVector<base::SharedMemory> output_buffers_;
+
+ // Input buffers ready to be filled with input from Encode(). As a LIFO since
+ // we don't care about ordering.
+ std::vector<int> input_buffers_free_;
+
+ // Timestamp of first frame returned from encoder. We calculate subsequent
+ // capture times as deltas from this base.
+ base::Time time_base_;
+
+ DISALLOW_COPY_AND_ASSIGN(Impl);
+};
+
+RTCVideoEncoder::Impl::Impl(
+ const base::WeakPtr<RTCVideoEncoder>& weak_encoder,
+ const scoped_refptr<RendererGpuVideoAcceleratorFactories>& gpu_factories)
+ : weak_encoder_(weak_encoder),
+ encoder_message_loop_proxy_(base::MessageLoopProxy::current()),
+ gpu_factories_(gpu_factories),
+ async_waiter_(NULL),
+ async_retval_(NULL),
+ input_next_frame_(NULL),
+ input_next_frame_keyframe_(false) {
+ thread_checker_.DetachFromThread();
+}
+
+void RTCVideoEncoder::Impl::CreateAndInitializeVEA(
+ const gfx::Size& input_visible_size,
+ uint32 bitrate,
+ media::VideoCodecProfile profile,
+ base::WaitableEvent* async_waiter,
+ int32_t* async_retval) {
+ DVLOG(3) << "Impl::CreateAndInitializeVEA()";
+ DCHECK(thread_checker_.CalledOnValidThread());
+
+ RegisterAsyncWaiter(async_waiter, async_retval);
+
+ // Check for overflow converting bitrate (kilobits/sec) to bits/sec.
+ if (bitrate > kuint32max / 1000) {
+ NOTIFY_ERROR(media::VideoEncodeAccelerator::kInvalidArgumentError);
+ return;
+ }
+
+ video_encoder_ = gpu_factories_->CreateVideoEncodeAccelerator(this).Pass();
+ if (!video_encoder_) {
+ NOTIFY_ERROR(media::VideoEncodeAccelerator::kPlatformFailureError);
+ return;
+ }
+ input_visible_size_ = input_visible_size;
+ video_encoder_->Initialize(
+ media::VideoFrame::I420, input_visible_size_, profile, bitrate * 1000);
+}
+
+void RTCVideoEncoder::Impl::Enqueue(const webrtc::I420VideoFrame* input_frame,
+ bool force_keyframe,
+ base::WaitableEvent* async_waiter,
+ int32_t* async_retval) {
+ DVLOG(3) << "Impl::Enqueue()";
+ DCHECK(thread_checker_.CalledOnValidThread());
+ DCHECK(!input_next_frame_);
+
+ RegisterAsyncWaiter(async_waiter, async_retval);
+ input_next_frame_ = input_frame;
+ input_next_frame_keyframe_ = force_keyframe;
+
+ if (!input_buffers_free_.empty())
+ EncodeOneFrame();
+}
+
+void RTCVideoEncoder::Impl::UseOutputBitstreamBufferId(
+ int32 bitstream_buffer_id) {
+ DVLOG(3) << "Impl::UseOutputBitstreamBufferIndex(): "
+ "bitstream_buffer_id=" << bitstream_buffer_id;
+ DCHECK(thread_checker_.CalledOnValidThread());
+ if (video_encoder_) {
+ video_encoder_->UseOutputBitstreamBuffer(media::BitstreamBuffer(
+ bitstream_buffer_id,
+ output_buffers_[bitstream_buffer_id]->handle(),
+ output_buffers_[bitstream_buffer_id]->mapped_size()));
+ }
+}
+
+void RTCVideoEncoder::Impl::RequestEncodingParametersChange(uint32 bitrate,
+ uint32 framerate) {
+ DVLOG(3) << "Impl::RequestEncodingParametersChange(): bitrate=" << bitrate
+ << ", framerate=" << framerate;
+ DCHECK(thread_checker_.CalledOnValidThread());
+
+ // Check for overflow converting bitrate (kilobits/sec) to bits/sec.
+ if (bitrate > kuint32max / 1000) {
+ NOTIFY_ERROR(media::VideoEncodeAccelerator::kInvalidArgumentError);
+ return;
+ }
+
+ if (video_encoder_)
+ video_encoder_->RequestEncodingParametersChange(bitrate * 1000, framerate);
+}
+
+void RTCVideoEncoder::Impl::Destroy() {
+ DVLOG(3) << "Impl::Destroy()";
+ DCHECK(thread_checker_.CalledOnValidThread());
+ if (video_encoder_)
+ video_encoder_.release()->Destroy();
+}
+
+void RTCVideoEncoder::Impl::NotifyInitializeDone() {
+ DVLOG(3) << "Impl::NotifyInitializeDone()";
+ DCHECK(thread_checker_.CalledOnValidThread());
+ SignalAsyncWaiter(WEBRTC_VIDEO_CODEC_OK);
+}
+
+void RTCVideoEncoder::Impl::RequireBitstreamBuffers(
+ unsigned int input_count,
+ const gfx::Size& input_coded_size,
+ size_t output_buffer_size) {
+ DVLOG(3) << "Impl::RequireBitstreamBuffers(): input_count=" << input_count
+ << ", input_coded_size=" << input_coded_size.ToString()
+ << ", output_buffer_size=" << output_buffer_size;
+ DCHECK(thread_checker_.CalledOnValidThread());
+
+ if (!video_encoder_)
+ return;
+
+ input_frame_coded_size_ = input_coded_size;
+
+ for (unsigned int i = 0; i < input_count + kInputBufferExtraCount; ++i) {
+ base::SharedMemory* shm =
+ gpu_factories_->CreateSharedMemory(input_coded_size.GetArea() * 3 / 2);
+ if (!shm) {
+ DLOG(ERROR) << "Impl::RequireBitstreamBuffers(): "
+ "failed to create input buffer " << i;
+ NOTIFY_ERROR(media::VideoEncodeAccelerator::kPlatformFailureError);
+ return;
+ }
+ input_buffers_.push_back(shm);
+ input_buffers_free_.push_back(i);
+ }
+
+ for (int i = 0; i < kOutputBufferCount; ++i) {
+ base::SharedMemory* shm =
+ gpu_factories_->CreateSharedMemory(output_buffer_size);
+ if (!shm) {
+ DLOG(ERROR) << "Impl::RequireBitstreamBuffers(): "
+ "failed to create output buffer " << i;
+ NOTIFY_ERROR(media::VideoEncodeAccelerator::kPlatformFailureError);
+ return;
+ }
+ output_buffers_.push_back(shm);
+ }
+
+ // Immediately provide all output buffers to the VEA.
+ for (size_t i = 0; i < output_buffers_.size(); ++i) {
+ video_encoder_->UseOutputBitstreamBuffer(media::BitstreamBuffer(
+ i, output_buffers_[i]->handle(), output_buffers_[i]->mapped_size()));
+ }
+}
+
+void RTCVideoEncoder::Impl::BitstreamBufferReady(int32 bitstream_buffer_id,
+ size_t payload_size,
+ bool key_frame) {
+ DVLOG(3) << "Impl::BitstreamBufferReady(): "
+ "bitstream_buffer_id=" << bitstream_buffer_id
+ << ", payload_size=" << payload_size
+ << ", key_frame=" << key_frame;
+ DCHECK(thread_checker_.CalledOnValidThread());
+
+ if (bitstream_buffer_id < 0 ||
+ bitstream_buffer_id >= static_cast<int>(output_buffers_.size())) {
+ DLOG(ERROR) << "Impl::BitstreamBufferReady(): invalid bitstream_buffer_id="
+ << bitstream_buffer_id;
+ NOTIFY_ERROR(media::VideoEncodeAccelerator::kPlatformFailureError);
+ return;
+ }
+ base::SharedMemory* output_buffer = output_buffers_[bitstream_buffer_id];
+ if (payload_size > output_buffer->mapped_size()) {
+ DLOG(ERROR) << "Impl::BitstreamBufferReady(): invalid payload_size="
+ << payload_size;
+ NOTIFY_ERROR(media::VideoEncodeAccelerator::kPlatformFailureError);
+ return;
+ }
+
+ const base::Time now = base::Time::Now();
+ if (time_base_.is_null())
+ time_base_ = now;
+ const base::TimeDelta delta = now - time_base_;
+
+ scoped_ptr<webrtc::EncodedImage> image(new webrtc::EncodedImage(
+ reinterpret_cast<uint8_t*>(output_buffer->memory()),
+ payload_size,
+ output_buffer->mapped_size()));
+ image->_encodedWidth = input_visible_size_.width();
+ image->_encodedHeight = input_visible_size_.height();
+ // Convert capture time to 90 kHz RTP timestamp.
+ image->_timeStamp = (delta * 90000).InSeconds();
+ image->capture_time_ms_ = delta.InMilliseconds();
+ image->_frameType = (key_frame ? webrtc::kKeyFrame : webrtc::kDeltaFrame);
+ image->_completeFrame = true;
+
+ encoder_message_loop_proxy_->PostTask(
+ FROM_HERE,
+ base::Bind(&RTCVideoEncoder::ReturnEncodedImage,
+ weak_encoder_,
+ base::Passed(&image),
+ bitstream_buffer_id));
+}
+
+void RTCVideoEncoder::Impl::NotifyError(
+ media::VideoEncodeAccelerator::Error error) {
+ DVLOG(3) << "Impl::NotifyError(): error=" << error;
+ DCHECK(thread_checker_.CalledOnValidThread());
+ int32_t retval;
+ switch (error) {
+ case media::VideoEncodeAccelerator::kInvalidArgumentError:
+ retval = WEBRTC_VIDEO_CODEC_ERR_PARAMETER;
+ break;
+ default:
+ retval = WEBRTC_VIDEO_CODEC_ERROR;
+ }
+
+ if (video_encoder_)
+ video_encoder_.release()->Destroy();
+
+ if (async_waiter_) {
+ SignalAsyncWaiter(retval);
+ } else {
+ encoder_message_loop_proxy_->PostTask(
+ FROM_HERE,
+ base::Bind(&RTCVideoEncoder::NotifyError, weak_encoder_, retval));
+ }
+}
+
+RTCVideoEncoder::Impl::~Impl() { DCHECK(!video_encoder_); }
+
+void RTCVideoEncoder::Impl::EncodeOneFrame() {
+ DVLOG(3) << "Impl::EncodeOneFrame()";
+ DCHECK(thread_checker_.CalledOnValidThread());
+ DCHECK(input_next_frame_);
+ DCHECK(!input_buffers_free_.empty());
+
+ // EncodeOneFrame() may re-enter EncodeFrameFinished() if VEA::Encode() fails,
+ // we receive a VEA::NotifyError(), and the media::VideoFrame we pass to
+ // Encode() gets destroyed early. Handle this by resetting our
+ // input_next_frame_* state before we hand off the VideoFrame to the VEA.
+ const webrtc::I420VideoFrame* next_frame = input_next_frame_;
+ bool next_frame_keyframe = input_next_frame_keyframe_;
+ input_next_frame_ = NULL;
+ input_next_frame_keyframe_ = false;
+
+ if (!video_encoder_) {
+ SignalAsyncWaiter(WEBRTC_VIDEO_CODEC_ERROR);
+ return;
+ }
+
+ const int index = input_buffers_free_.back();
+ base::SharedMemory* input_buffer = input_buffers_[index];
+
+ // Do a strided copy of the input frame to match the input requirements for
+ // the encoder.
+ // TODO(sheu): support zero-copy from WebRTC. http://crbug.com/269312
+ const uint8_t* src = next_frame->buffer(webrtc::kYPlane);
+ uint8* dst = reinterpret_cast<uint8*>(input_buffer->memory());
+ uint8* const y_dst = dst;
+ int width = input_frame_coded_size_.width();
+ int stride = next_frame->stride(webrtc::kYPlane);
+ for (int i = 0; i < next_frame->height(); ++i) {
+ memcpy(dst, src, width);
+ src += stride;
+ dst += width;
+ }
+ src = next_frame->buffer(webrtc::kUPlane);
+ width = input_frame_coded_size_.width() / 2;
+ stride = next_frame->stride(webrtc::kUPlane);
+ for (int i = 0; i < next_frame->height() / 2; ++i) {
+ memcpy(dst, src, width);
+ src += stride;
+ dst += width;
+ }
+ src = next_frame->buffer(webrtc::kVPlane);
+ width = input_frame_coded_size_.width() / 2;
+ stride = next_frame->stride(webrtc::kVPlane);
+ for (int i = 0; i < next_frame->height() / 2; ++i) {
+ memcpy(dst, src, width);
+ src += stride;
+ dst += width;
+ }
+
+ scoped_refptr<media::VideoFrame> frame =
+ media::VideoFrame::WrapExternalSharedMemory(
+ media::VideoFrame::I420,
+ input_frame_coded_size_,
+ gfx::Rect(input_visible_size_),
+ input_visible_size_,
+ y_dst,
+ input_buffer->handle(),
+ base::TimeDelta(),
+ base::Bind(&RTCVideoEncoder::Impl::EncodeFrameFinished, this, index));
+
+ video_encoder_->Encode(frame, next_frame_keyframe);
+ input_buffers_free_.pop_back();
+ SignalAsyncWaiter(WEBRTC_VIDEO_CODEC_OK);
+}
+
+void RTCVideoEncoder::Impl::EncodeFrameFinished(int index) {
+ DVLOG(3) << "Impl::EncodeFrameFinished(): index=" << index;
+ DCHECK(thread_checker_.CalledOnValidThread());
+ DCHECK_GE(index, 0);
+ DCHECK_LT(index, static_cast<int>(input_buffers_.size()));
+ input_buffers_free_.push_back(index);
+ if (input_next_frame_)
+ EncodeOneFrame();
+}
+
+void RTCVideoEncoder::Impl::RegisterAsyncWaiter(base::WaitableEvent* waiter,
+ int32_t* retval) {
+ DCHECK(thread_checker_.CalledOnValidThread());
+ DCHECK(!async_waiter_);
+ DCHECK(!async_retval_);
+ async_waiter_ = waiter;
+ async_retval_ = retval;
+}
+
+void RTCVideoEncoder::Impl::SignalAsyncWaiter(int32_t retval) {
+ DCHECK(thread_checker_.CalledOnValidThread());
+ *async_retval_ = retval;
+ async_waiter_->Signal();
+ async_retval_ = NULL;
+ async_waiter_ = NULL;
+}
+
+#undef NOTIFY_ERROR
+
+////////////////////////////////////////////////////////////////////////////////
+//
+// RTCVideoEncoder
+//
+////////////////////////////////////////////////////////////////////////////////
+
+RTCVideoEncoder::RTCVideoEncoder(
+ webrtc::VideoCodecType type,
+ media::VideoCodecProfile profile,
+ const scoped_refptr<RendererGpuVideoAcceleratorFactories>& gpu_factories)
+ : video_codec_type_(type),
+ video_codec_profile_(profile),
+ gpu_factories_(gpu_factories),
+ encoded_image_callback_(NULL),
+ impl_status_(WEBRTC_VIDEO_CODEC_UNINITIALIZED) {
+ DVLOG(1) << "RTCVideoEncoder(): profile=" << profile;
+}
+
+RTCVideoEncoder::~RTCVideoEncoder() {
+ DCHECK(thread_checker_.CalledOnValidThread());
+ Release();
+ DCHECK(!impl_);
+}
+
+int32_t RTCVideoEncoder::InitEncode(const webrtc::VideoCodec* codec_settings,
+ int32_t number_of_cores,
+ uint32_t max_payload_size) {
+ DVLOG(1) << "InitEncode(): codecType=" << codec_settings->codecType
+ << ", width=" << codec_settings->width
+ << ", height=" << codec_settings->height
+ << ", startBitrate=" << codec_settings->startBitrate;
+ DCHECK(thread_checker_.CalledOnValidThread());
+ DCHECK(!impl_);
+
+ weak_this_factory_.reset(new base::WeakPtrFactory<RTCVideoEncoder>(this));
+ impl_ = new Impl(weak_this_factory_->GetWeakPtr(), gpu_factories_);
+ base::WaitableEvent initialization_waiter(true, false);
+ int32_t initialization_retval = WEBRTC_VIDEO_CODEC_UNINITIALIZED;
+ gpu_factories_->GetMessageLoop()->PostTask(
+ FROM_HERE,
+ base::Bind(&RTCVideoEncoder::Impl::CreateAndInitializeVEA,
+ impl_,
+ gfx::Size(codec_settings->width, codec_settings->height),
+ codec_settings->startBitrate,
+ video_codec_profile_,
+ &initialization_waiter,
+ &initialization_retval));
+
+ // webrtc::VideoEncoder expects this call to be synchronous.
+ initialization_waiter.Wait();
+ return initialization_retval;
+}
+
+int32_t RTCVideoEncoder::Encode(
+ const webrtc::I420VideoFrame& input_image,
+ const webrtc::CodecSpecificInfo* codec_specific_info,
+ const std::vector<webrtc::VideoFrameType>* frame_types) {
+ DVLOG(3) << "Encode()";
+ // TODO(sheu): figure out why this check fails.
+ // DCHECK(thread_checker_.CalledOnValidThread());
+ if (!impl_) {
+ DVLOG(3) << "Encode(): returning impl_status_=" << impl_status_;
+ return impl_status_;
+ }
+
+ base::WaitableEvent encode_waiter(true, false);
+ int32_t encode_retval = WEBRTC_VIDEO_CODEC_UNINITIALIZED;
+ gpu_factories_->GetMessageLoop()->PostTask(
+ FROM_HERE,
+ base::Bind(&RTCVideoEncoder::Impl::Enqueue,
+ impl_,
+ &input_image,
+ (frame_types->front() == webrtc::kKeyFrame),
+ &encode_waiter,
+ &encode_retval));
+
+ // webrtc::VideoEncoder expects this call to be synchronous.
+ encode_waiter.Wait();
+ DVLOG(3) << "Encode(): returning encode_retval=" << encode_retval;
+ return encode_retval;
+}
+
+int32_t RTCVideoEncoder::RegisterEncodeCompleteCallback(
+ webrtc::EncodedImageCallback* callback) {
+ DVLOG(3) << "RegisterEncodeCompleteCallback()";
+ DCHECK(thread_checker_.CalledOnValidThread());
+ if (!impl_) {
+ DVLOG(3) << "RegisterEncodeCompleteCallback(): returning " << impl_status_;
+ return impl_status_;
+ }
+
+ encoded_image_callback_ = callback;
+ return WEBRTC_VIDEO_CODEC_OK;
+}
+
+int32_t RTCVideoEncoder::Release() {
+ DVLOG(3) << "Release()";
+ DCHECK(thread_checker_.CalledOnValidThread());
+
+ // Reset the gpu_factory_, in case we reuse this encoder.
+ gpu_factories_->Abort();
+ gpu_factories_ = gpu_factories_->Clone();
+ if (impl_) {
+ gpu_factories_->GetMessageLoop()->PostTask(
+ FROM_HERE, base::Bind(&RTCVideoEncoder::Impl::Destroy, impl_));
+ impl_ = NULL;
+ weak_this_factory_.reset();
+ impl_status_ = WEBRTC_VIDEO_CODEC_UNINITIALIZED;
+ }
+ return WEBRTC_VIDEO_CODEC_OK;
+}
+
+int32_t RTCVideoEncoder::SetChannelParameters(uint32_t packet_loss, int rtt) {
+ DVLOG(3) << "SetChannelParameters(): packet_loss=" << packet_loss
+ << ", rtt=" << rtt;
+ DCHECK(thread_checker_.CalledOnValidThread());
+ // Ignored.
+ return WEBRTC_VIDEO_CODEC_OK;
+}
+
+int32_t RTCVideoEncoder::SetRates(uint32_t new_bit_rate, uint32_t frame_rate) {
+ DVLOG(3) << "SetRates(): new_bit_rate=" << new_bit_rate
+ << ", frame_rate=" << frame_rate;
+ DCHECK(thread_checker_.CalledOnValidThread());
+ if (!impl_) {
+ DVLOG(3) << "SetRates(): returning " << impl_status_;
+ return impl_status_;
+ }
+
+ gpu_factories_->GetMessageLoop()->PostTask(
+ FROM_HERE,
+ base::Bind(&RTCVideoEncoder::Impl::RequestEncodingParametersChange,
+ impl_,
+ new_bit_rate,
+ frame_rate));
+ return WEBRTC_VIDEO_CODEC_OK;
+}
+
+void RTCVideoEncoder::ReturnEncodedImage(scoped_ptr<webrtc::EncodedImage> image,
+ int32 bitstream_buffer_id) {
+ DCHECK(thread_checker_.CalledOnValidThread());
+ DVLOG(3) << "ReturnEncodedImage(): "
+ "bitstream_buffer_id=" << bitstream_buffer_id;
+
+ if (!encoded_image_callback_)
+ return;
+
+ webrtc::CodecSpecificInfo info;
+ info.codecType = video_codec_type_;
+
+ // Generate a header describing a single fragment.
+ webrtc::RTPFragmentationHeader header;
+ header.VerifyAndAllocateFragmentationHeader(1);
+ header.fragmentationOffset[0] = 0;
+ header.fragmentationLength[0] = image->_length;
+ header.fragmentationPlType[0] = 0;
+ header.fragmentationTimeDiff[0] = 0;
+
+ int32_t retval = encoded_image_callback_->Encoded(*image, &info, &header);
+ if (retval < 0) {
+ DVLOG(2) << "ReturnEncodedImage(): encoded_image_callback_ returned "
+ << retval;
+ }
+
+ // The call through webrtc::EncodedImageCallback is synchronous, so we can
+ // immediately recycle the output buffer back to the Impl.
+ gpu_factories_->GetMessageLoop()->PostTask(
+ FROM_HERE,
+ base::Bind(&RTCVideoEncoder::Impl::UseOutputBitstreamBufferId,
+ impl_,
+ bitstream_buffer_id));
+}
+
+void RTCVideoEncoder::NotifyError(int32_t error) {
+ DCHECK(thread_checker_.CalledOnValidThread());
+ DVLOG(1) << "NotifyError(): error=" << error;
+
+ impl_status_ = error;
+ gpu_factories_->GetMessageLoop()->PostTask(
+ FROM_HERE, base::Bind(&RTCVideoEncoder::Impl::Destroy, impl_));
+ impl_ = NULL;
+}
+
+} // namespace content
diff --git a/content/renderer/media/rtc_video_encoder.h b/content/renderer/media/rtc_video_encoder.h
new file mode 100644
index 0000000..22d4c50
--- /dev/null
+++ b/content/renderer/media/rtc_video_encoder.h
@@ -0,0 +1,104 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef CONTENT_RENDERER_MEDIA_RTC_VIDEO_ENCODER_H_
+#define CONTENT_RENDERER_MEDIA_RTC_VIDEO_ENCODER_H_
+
+#include <vector>
+
+#include "base/memory/ref_counted.h"
+#include "base/memory/scoped_ptr.h"
+#include "base/memory/weak_ptr.h"
+#include "base/threading/thread_checker.h"
+#include "base/time/time.h"
+#include "content/common/content_export.h"
+#include "media/base/video_decoder_config.h"
+#include "third_party/webrtc/modules/video_coding/codecs/interface/video_codec_interface.h"
+#include "ui/gfx/size.h"
+
+namespace base {
+
+class MessageLoopProxy;
+
+} // namespace base
+
+namespace content {
+
+class RendererGpuVideoAcceleratorFactories;
+
+// RTCVideoEncoder uses a media::VideoEncodeAccelerator to implement a
+// webrtc::VideoEncoder class for WebRTC. Internally, VEA methods are
+// trampolined to a private RTCVideoEncoder::Impl instance. The Impl class runs
+// on the worker thread queried from the |gpu_factories_|, which is presently
+// the media thread. RTCVideoEncoder itself is run and destroyed on the thread
+// it is constructed on, which is presently the libjingle worker thread.
+// Callbacks from the Impl due to its VEA::Client notifications are also posted
+// back to RTCVideoEncoder on this thread.
+class CONTENT_EXPORT RTCVideoEncoder
+ : NON_EXPORTED_BASE(public webrtc::VideoEncoder) {
+ public:
+ RTCVideoEncoder(
+ webrtc::VideoCodecType type,
+ media::VideoCodecProfile profile,
+ const scoped_refptr<RendererGpuVideoAcceleratorFactories>& gpu_factories);
+ virtual ~RTCVideoEncoder();
+
+ // webrtc::VideoEncoder implementation. Tasks are posted to |impl_| using the
+ // appropriate VEA methods.
+ virtual int32_t InitEncode(const webrtc::VideoCodec* codec_settings,
+ int32_t number_of_cores,
+ uint32_t max_payload_size) OVERRIDE;
+ virtual int32_t Encode(
+ const webrtc::I420VideoFrame& input_image,
+ const webrtc::CodecSpecificInfo* codec_specific_info,
+ const std::vector<webrtc::VideoFrameType>* frame_types) OVERRIDE;
+ virtual int32_t RegisterEncodeCompleteCallback(
+ webrtc::EncodedImageCallback* callback) OVERRIDE;
+ virtual int32_t Release() OVERRIDE;
+ virtual int32_t SetChannelParameters(uint32_t packet_loss, int rtt) OVERRIDE;
+ virtual int32_t SetRates(uint32_t new_bit_rate, uint32_t frame_rate) OVERRIDE;
+
+ private:
+ class Impl;
+ friend class RTCVideoEncoder::Impl;
+
+ // Return an encoded output buffer to WebRTC.
+ void ReturnEncodedImage(scoped_ptr<webrtc::EncodedImage> image,
+ int32 bitstream_buffer_id);
+
+ void NotifyError(int32_t error);
+
+ base::ThreadChecker thread_checker_;
+
+ // The video codec type, as reported to WebRTC.
+ const webrtc::VideoCodecType video_codec_type_;
+
+ // The video codec profile, to configure the encoder to encode to.
+ const media::VideoCodecProfile video_codec_profile_;
+
+ // Factory for creating VEAs, shared memory buffers, etc.
+ scoped_refptr<RendererGpuVideoAcceleratorFactories> gpu_factories_;
+
+ // Weak pointer and factory for posting back VEA::Client notifications to
+ // RTCVideoEncoder.
+ scoped_ptr<base::WeakPtrFactory<RTCVideoEncoder> > weak_this_factory_;
+
+ // webrtc::VideoEncoder encode complete callback.
+ webrtc::EncodedImageCallback* encoded_image_callback_;
+
+ // The RTCVideoEncoder::Impl that does all the work.
+ scoped_refptr<Impl> impl_;
+
+ // We cannot immediately return error conditions to the WebRTC user of this
+ // class, as there is no error callback in the webrtc::VideoEncoder interface.
+ // Instead, we cache an error status here and return it the next time an
+ // interface entry point is called.
+ int32_t impl_status_;
+
+ DISALLOW_COPY_AND_ASSIGN(RTCVideoEncoder);
+};
+
+} // namespace content
+
+#endif // CONTENT_RENDERER_MEDIA_RTC_VIDEO_ENCODER_H_
diff --git a/content/renderer/media/rtc_video_encoder_factory.cc b/content/renderer/media/rtc_video_encoder_factory.cc
new file mode 100644
index 0000000..3ff4272
--- /dev/null
+++ b/content/renderer/media/rtc_video_encoder_factory.cc
@@ -0,0 +1,110 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "content/renderer/media/rtc_video_encoder_factory.h"
+
+#include "content/common/gpu/client/gpu_video_encode_accelerator_host.h"
+#include "content/renderer/media/renderer_gpu_video_accelerator_factories.h"
+#include "content/renderer/media/rtc_video_encoder.h"
+#include "media/video/video_encode_accelerator.h"
+
+namespace content {
+
+namespace {
+
+// Translate from media::VideoEncodeAccelerator::SupportedProfile to
+// cricket::WebRtcVideoEncoderFactory::VideoCodec
+cricket::WebRtcVideoEncoderFactory::VideoCodec VEAToWebRTCCodec(
+ const media::VideoEncodeAccelerator::SupportedProfile& profile) {
+ webrtc::VideoCodecType type = webrtc::kVideoCodecUnknown;
+ std::string name;
+ int width = 0, height = 0, fps = 0;
+
+ if (profile.profile >= media::VP8PROFILE_MIN &&
+ profile.profile <= media::VP8PROFILE_MAX) {
+ type = webrtc::kVideoCodecVP8;
+ name = "VP8";
+ } else if (profile.profile >= media::H264PROFILE_MIN &&
+ profile.profile <= media::H264PROFILE_MAX) {
+ type = webrtc::kVideoCodecGeneric;
+ name = "CAST1";
+ }
+
+ if (type != webrtc::kVideoCodecUnknown) {
+ width = profile.max_resolution.width();
+ height = profile.max_resolution.height();
+ fps = profile.max_framerate.numerator;
+ DCHECK_EQ(profile.max_framerate.denominator, 1U);
+ }
+
+ return cricket::WebRtcVideoEncoderFactory::VideoCodec(
+ type, name, width, height, fps);
+}
+
+// Translate from cricket::WebRtcVideoEncoderFactory::VideoCodec to
+// media::VideoCodecProfile. Pick a default profile for each codec type.
+media::VideoCodecProfile WebRTCCodecToVideoCodecProfile(
+ webrtc::VideoCodecType type) {
+ switch (type) {
+ case webrtc::kVideoCodecVP8:
+ return media::VP8PROFILE_MAIN;
+ case webrtc::kVideoCodecGeneric:
+ return media::H264PROFILE_MAIN;
+ default:
+ return media::VIDEO_CODEC_PROFILE_UNKNOWN;
+ }
+}
+
+} // anonymous namespace
+
+RTCVideoEncoderFactory::RTCVideoEncoderFactory(
+ const scoped_refptr<RendererGpuVideoAcceleratorFactories>& gpu_factories)
+ : gpu_factories_(gpu_factories) {
+ // Query media::VideoEncodeAccelerator (statically) for our supported codecs.
+ std::vector<media::VideoEncodeAccelerator::SupportedProfile> profiles =
+ GpuVideoEncodeAcceleratorHost::GetSupportedProfiles();
+ for (size_t i = 0; i < profiles.size(); ++i) {
+ VideoCodec codec = VEAToWebRTCCodec(profiles[i]);
+ if (codec.type != webrtc::kVideoCodecUnknown)
+ codecs_.push_back(codec);
+ }
+}
+
+RTCVideoEncoderFactory::~RTCVideoEncoderFactory() {}
+
+webrtc::VideoEncoder* RTCVideoEncoderFactory::CreateVideoEncoder(
+ webrtc::VideoCodecType type) {
+ bool found = false;
+ for (size_t i = 0; i < codecs_.size(); ++i) {
+ if (codecs_[i].type == type) {
+ found = true;
+ break;
+ }
+ }
+ if (!found)
+ return NULL;
+ // GpuVideoAcceleratorFactories is not thread safe. It cannot be shared
+ // by different encoders. Since we aren't running on the child thread and
+ // cannot create a new factory, clone one instead.
+ return new RTCVideoEncoder(
+ type, WebRTCCodecToVideoCodecProfile(type), gpu_factories_->Clone());
+}
+
+void RTCVideoEncoderFactory::AddObserver(Observer* observer) {
+ // No-op: our codec list is populated on installation.
+}
+
+void RTCVideoEncoderFactory::RemoveObserver(Observer* observer) {}
+
+const std::vector<cricket::WebRtcVideoEncoderFactory::VideoCodec>&
+RTCVideoEncoderFactory::codecs() const {
+ return codecs_;
+}
+
+void RTCVideoEncoderFactory::DestroyVideoEncoder(
+ webrtc::VideoEncoder* encoder) {
+ delete encoder;
+}
+
+} // namespace content
diff --git a/content/renderer/media/rtc_video_encoder_factory.h b/content/renderer/media/rtc_video_encoder_factory.h
new file mode 100644
index 0000000..b07ccda
--- /dev/null
+++ b/content/renderer/media/rtc_video_encoder_factory.h
@@ -0,0 +1,48 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef CONTENT_RENDERER_MEDIA_RTC_VIDEO_ENCODER_FACTORY_H_
+#define CONTENT_RENDERER_MEDIA_RTC_VIDEO_ENCODER_FACTORY_H_
+
+#include <vector>
+
+#include "base/compiler_specific.h"
+#include "base/memory/ref_counted.h"
+#include "content/common/content_export.h"
+#include "third_party/libjingle/source/talk/media/webrtc/webrtcvideoencoderfactory.h"
+
+namespace content {
+
+class RendererGpuVideoAcceleratorFactories;
+
+// This class creates RTCVideoEncoder instances (each wrapping a
+// media::VideoEncodeAccelerator) on behalf of the WebRTC stack.
+class CONTENT_EXPORT RTCVideoEncoderFactory
+ : NON_EXPORTED_BASE(public cricket::WebRtcVideoEncoderFactory) {
+ public:
+ explicit RTCVideoEncoderFactory(
+ const scoped_refptr<RendererGpuVideoAcceleratorFactories>& gpu_factories);
+ virtual ~RTCVideoEncoderFactory();
+
+ // cricket::WebRtcVideoEncoderFactory implementation.
+ virtual webrtc::VideoEncoder* CreateVideoEncoder(
+ webrtc::VideoCodecType type) OVERRIDE;
+ virtual void AddObserver(Observer* observer) OVERRIDE;
+ virtual void RemoveObserver(Observer* observer) OVERRIDE;
+ virtual const std::vector<VideoCodec>& codecs() const OVERRIDE;
+ virtual void DestroyVideoEncoder(webrtc::VideoEncoder* encoder) OVERRIDE;
+
+ private:
+ const scoped_refptr<RendererGpuVideoAcceleratorFactories> gpu_factories_;
+
+ // Codec support list of cricket::WebRtcVideoEncoderFactory::VideoCodec
+ // instances.
+ std::vector<VideoCodec> codecs_;
+
+ DISALLOW_COPY_AND_ASSIGN(RTCVideoEncoderFactory);
+};
+
+} // namespace content
+
+#endif // CONTENT_RENDERER_MEDIA_RTC_VIDEO_ENCODER_FACTORY_H_
diff --git a/content/renderer/media/webmediaplayer_impl.cc b/content/renderer/media/webmediaplayer_impl.cc
index 227d0cf..d478097 100644
--- a/content/renderer/media/webmediaplayer_impl.cc
+++ b/content/renderer/media/webmediaplayer_impl.cc
@@ -43,8 +43,8 @@
#include "media/filters/ffmpeg_audio_decoder.h"
#include "media/filters/ffmpeg_demuxer.h"
#include "media/filters/ffmpeg_video_decoder.h"
+#include "media/filters/gpu_video_accelerator_factories.h"
#include "media/filters/gpu_video_decoder.h"
-#include "media/filters/gpu_video_decoder_factories.h"
#include "media/filters/opus_audio_decoder.h"
#include "media/filters/video_renderer_base.h"
#include "media/filters/vpx_video_decoder.h"
diff --git a/content/renderer/media/webmediaplayer_impl.h b/content/renderer/media/webmediaplayer_impl.h
index 23b1ddf52..c386b08 100644
--- a/content/renderer/media/webmediaplayer_impl.h
+++ b/content/renderer/media/webmediaplayer_impl.h
@@ -56,7 +56,7 @@ class MessageLoopProxy;
namespace media {
class ChunkDemuxer;
class FFmpegDemuxer;
-class GpuVideoDecoderFactories;
+class GpuVideoAcceleratorFactories;
class MediaLog;
}
@@ -318,8 +318,8 @@ class WebMediaPlayerImpl
bool incremented_externally_allocated_memory_;
- // Factories for supporting GpuVideoDecoder. May be null.
- scoped_refptr<media::GpuVideoDecoderFactories> gpu_factories_;
+ // Factories for supporting video accelerators. May be null.
+ scoped_refptr<media::GpuVideoAcceleratorFactories> gpu_factories_;
// Routes audio playback to either AudioRendererSink or WebAudio.
scoped_refptr<WebAudioSourceProviderImpl> audio_source_provider_;
diff --git a/content/renderer/media/webmediaplayer_params.cc b/content/renderer/media/webmediaplayer_params.cc
index 04fe310..a05abbf 100644
--- a/content/renderer/media/webmediaplayer_params.cc
+++ b/content/renderer/media/webmediaplayer_params.cc
@@ -7,7 +7,7 @@
#include "base/message_loop/message_loop_proxy.h"
#include "media/base/audio_renderer_sink.h"
#include "media/base/media_log.h"
-#include "media/filters/gpu_video_decoder_factories.h"
+#include "media/filters/gpu_video_accelerator_factories.h"
namespace content {
@@ -15,7 +15,7 @@ WebMediaPlayerParams::WebMediaPlayerParams(
const scoped_refptr<base::MessageLoopProxy>& message_loop_proxy,
const base::Callback<void(const base::Closure&)>& defer_load_cb,
const scoped_refptr<media::AudioRendererSink>& audio_renderer_sink,
- const scoped_refptr<media::GpuVideoDecoderFactories>& gpu_factories,
+ const scoped_refptr<media::GpuVideoAcceleratorFactories>& gpu_factories,
const scoped_refptr<media::MediaLog>& media_log)
: message_loop_proxy_(message_loop_proxy),
defer_load_cb_(defer_load_cb),
diff --git a/content/renderer/media/webmediaplayer_params.h b/content/renderer/media/webmediaplayer_params.h
index 4347a4a..bf39864 100644
--- a/content/renderer/media/webmediaplayer_params.h
+++ b/content/renderer/media/webmediaplayer_params.h
@@ -14,7 +14,7 @@ class MessageLoopProxy;
namespace media {
class AudioRendererSink;
-class GpuVideoDecoderFactories;
+class GpuVideoAcceleratorFactories;
class MediaLog;
}
@@ -30,7 +30,7 @@ class WebMediaPlayerParams {
const scoped_refptr<base::MessageLoopProxy>& message_loop_proxy,
const base::Callback<void(const base::Closure&)>& defer_load_cb,
const scoped_refptr<media::AudioRendererSink>& audio_renderer_sink,
- const scoped_refptr<media::GpuVideoDecoderFactories>& gpu_factories,
+ const scoped_refptr<media::GpuVideoAcceleratorFactories>& gpu_factories,
const scoped_refptr<media::MediaLog>& media_log);
~WebMediaPlayerParams();
@@ -46,7 +46,8 @@ class WebMediaPlayerParams {
return audio_renderer_sink_;
}
- const scoped_refptr<media::GpuVideoDecoderFactories>& gpu_factories() const {
+ const scoped_refptr<media::GpuVideoAcceleratorFactories>& gpu_factories()
+ const {
return gpu_factories_;
}
@@ -58,7 +59,7 @@ class WebMediaPlayerParams {
scoped_refptr<base::MessageLoopProxy> message_loop_proxy_;
base::Callback<void(const base::Closure&)> defer_load_cb_;
scoped_refptr<media::AudioRendererSink> audio_renderer_sink_;
- scoped_refptr<media::GpuVideoDecoderFactories> gpu_factories_;
+ scoped_refptr<media::GpuVideoAcceleratorFactories> gpu_factories_;
scoped_refptr<media::MediaLog> media_log_;
DISALLOW_IMPLICIT_CONSTRUCTORS(WebMediaPlayerParams);
diff --git a/content/renderer/render_thread_impl.cc b/content/renderer/render_thread_impl.cc
index b1e4a71..f15738d 100644
--- a/content/renderer/render_thread_impl.cc
+++ b/content/renderer/render_thread_impl.cc
@@ -87,7 +87,7 @@
#include "ipc/ipc_platform_file.h"
#include "media/base/audio_hardware_config.h"
#include "media/base/media.h"
-#include "media/filters/gpu_video_decoder_factories.h"
+#include "media/filters/gpu_video_accelerator_factories.h"
#include "net/base/net_errors.h"
#include "net/base/net_util.h"
#include "third_party/WebKit/public/platform/WebString.h"
@@ -887,22 +887,20 @@ void RenderThreadImpl::PostponeIdleNotification() {
idle_notifications_to_skip_ = 2;
}
-scoped_refptr<RendererGpuVideoDecoderFactories>
+scoped_refptr<RendererGpuVideoAcceleratorFactories>
RenderThreadImpl::GetGpuFactories(
const scoped_refptr<base::MessageLoopProxy>& factories_loop) {
DCHECK(IsMainThread());
const CommandLine* cmd_line = CommandLine::ForCurrentProcess();
- scoped_refptr<RendererGpuVideoDecoderFactories> gpu_factories;
+ scoped_refptr<RendererGpuVideoAcceleratorFactories> gpu_factories;
WebGraphicsContext3DCommandBufferImpl* context3d = NULL;
if (!cmd_line->HasSwitch(switches::kDisableAcceleratedVideoDecode))
context3d = GetGpuVDAContext3D();
- if (context3d) {
- GpuChannelHost* gpu_channel_host = GetGpuChannel();
- if (gpu_channel_host) {
- gpu_factories = new RendererGpuVideoDecoderFactories(
- gpu_channel_host, factories_loop, context3d);
- }
+ GpuChannelHost* gpu_channel_host = GetGpuChannel();
+ if (gpu_channel_host) {
+ gpu_factories = new RendererGpuVideoAcceleratorFactories(
+ gpu_channel_host, factories_loop, context3d);
}
return gpu_factories;
}
diff --git a/content/renderer/render_thread_impl.h b/content/renderer/render_thread_impl.h
index fd2cfff..d11a498 100644
--- a/content/renderer/render_thread_impl.h
+++ b/content/renderer/render_thread_impl.h
@@ -19,7 +19,7 @@
#include "content/common/gpu/client/gpu_channel_host.h"
#include "content/common/gpu/gpu_process_launch_causes.h"
#include "content/public/renderer/render_thread.h"
-#include "content/renderer/media/renderer_gpu_video_decoder_factories.h"
+#include "content/renderer/media/renderer_gpu_video_accelerator_factories.h"
#include "ipc/ipc_channel_proxy.h"
#include "ui/gfx/native_widget_types.h"
@@ -55,7 +55,6 @@ class ForwardingMessageFilter;
namespace media {
class AudioHardwareConfig;
-class GpuVideoDecoderFactories;
}
namespace v8 {
@@ -258,9 +257,8 @@ class CONTENT_EXPORT RenderThreadImpl : public RenderThread,
// not sent for at least one notification delay.
void PostponeIdleNotification();
- // Gets gpu factories, which will run on |factories_loop|. Returns NULL if VDA
- // is disabled or a graphics context cannot be obtained.
- scoped_refptr<RendererGpuVideoDecoderFactories> GetGpuFactories(
+ // Gets gpu factories, which will run on |factories_loop|.
+ scoped_refptr<RendererGpuVideoAcceleratorFactories> GetGpuFactories(
const scoped_refptr<base::MessageLoopProxy>& factories_loop);
// Returns a graphics context shared among all
diff --git a/content/renderer/render_view_impl.cc b/content/renderer/render_view_impl.cc
index 2f57b82..8c7eb6f 100644
--- a/content/renderer/render_view_impl.cc
+++ b/content/renderer/render_view_impl.cc
@@ -132,7 +132,7 @@
#include "media/base/filter_collection.h"
#include "media/base/media_switches.h"
#include "media/filters/audio_renderer_impl.h"
-#include "media/filters/gpu_video_decoder_factories.h"
+#include "media/filters/gpu_video_accelerator_factories.h"
#include "net/base/data_url.h"
#include "net/base/escape.h"
#include "net/base/net_errors.h"
@@ -3066,7 +3066,7 @@ WebMediaPlayer* RenderViewImpl::createMediaPlayer(
DVLOG(1) << "Using AudioRendererMixerManager-provided sink: " << sink.get();
}
- scoped_refptr<media::GpuVideoDecoderFactories> gpu_factories =
+ scoped_refptr<media::GpuVideoAcceleratorFactories> gpu_factories =
RenderThreadImpl::current()->GetGpuFactories(
RenderThreadImpl::current()->GetMediaThreadMessageLoopProxy());