summaryrefslogtreecommitdiffstats
path: root/chrome/renderer
diff options
context:
space:
mode:
Diffstat (limited to 'chrome/renderer')
-rw-r--r--chrome/renderer/ggl/ggl.cc24
-rw-r--r--chrome/renderer/ggl/ggl.h20
-rw-r--r--chrome/renderer/gpu_video_decoder_host.cc101
-rw-r--r--chrome/renderer/gpu_video_decoder_host.h83
-rw-r--r--chrome/renderer/media/gles2_video_decode_context.cc6
-rw-r--r--chrome/renderer/media/gles2_video_decode_context.h74
-rw-r--r--chrome/renderer/media/ipc_video_decoder.cc362
-rw-r--r--chrome/renderer/media/ipc_video_decoder.h65
8 files changed, 296 insertions, 439 deletions
diff --git a/chrome/renderer/ggl/ggl.cc b/chrome/renderer/ggl/ggl.cc
index 62453f7..4ae8c9f 100644
--- a/chrome/renderer/ggl/ggl.cc
+++ b/chrome/renderer/ggl/ggl.cc
@@ -12,6 +12,8 @@
#include "chrome/renderer/ggl/ggl.h"
#include "chrome/renderer/gpu_channel_host.h"
#include "chrome/renderer/gpu_video_service_host.h"
+#include "chrome/renderer/media/gles2_video_decode_context.h"
+#include "chrome/renderer/render_thread.h"
#include "chrome/renderer/render_widget.h"
#include "ipc/ipc_channel_handle.h"
@@ -99,7 +101,10 @@ class Context : public base::SupportsWeakPtr<Context> {
bool SwapBuffers();
// Create a hardware accelerated video decoder associated with this context.
- GpuVideoDecoderHost* CreateVideoDecoder();
+ media::VideoDecodeEngine* CreateVideoDecodeEngine();
+
+ // Create a hardware video decode context associated with this context.
+ media::VideoDecodeContext* CreateVideoDecodeContext(bool hardware_decoder);
// Get the current error code.
Error GetError();
@@ -324,11 +329,17 @@ bool Context::SwapBuffers() {
return true;
}
-GpuVideoDecoderHost* Context::CreateVideoDecoder() {
+media::VideoDecodeEngine* Context::CreateVideoDecodeEngine() {
return GpuVideoServiceHost::get()->CreateVideoDecoder(
command_buffer_->route_id());
}
+media::VideoDecodeContext* Context::CreateVideoDecodeContext(
+ bool hardware_decoder) {
+ return new Gles2VideoDecodeContext(
+ RenderThread::current()->message_loop(), hardware_decoder, this);
+}
+
Error Context::GetError() {
gpu::CommandBuffer::State state = command_buffer_->GetState();
if (state.error == gpu::error::kNoError) {
@@ -465,8 +476,13 @@ bool DestroyContext(Context* context) {
#endif
}
-GpuVideoDecoderHost* CreateVideoDecoder(Context* context) {
- return context->CreateVideoDecoder();
+media::VideoDecodeEngine* CreateVideoDecodeEngine(Context* context) {
+ return context->CreateVideoDecodeEngine();
+}
+
+media::VideoDecodeContext* CreateVideoDecodeContext(
+ Context* context, bool hardware_decoder) {
+ return context->CreateVideoDecodeContext(hardware_decoder);
}
Error GetError() {
diff --git a/chrome/renderer/ggl/ggl.h b/chrome/renderer/ggl/ggl.h
index 8d3f0e2..c20e91b 100644
--- a/chrome/renderer/ggl/ggl.h
+++ b/chrome/renderer/ggl/ggl.h
@@ -16,7 +16,13 @@
#include "gfx/size.h"
class GpuChannelHost;
-class GpuVideoDecoderHost;
+
+namespace media {
+
+class VideoDecodeContext;
+class VideoDecodeEngine;
+
+}
namespace ggl {
@@ -115,8 +121,16 @@ bool SwapBuffers(Context* context);
// Destroy the given GGL context.
bool DestroyContext(Context* context);
-// Create a hardware video decoder corresponding to the context.
-GpuVideoDecoderHost* CreateVideoDecoder(Context* context);
+// Create a hardware video decode engine corresponding to the context.
+media::VideoDecodeEngine* CreateVideoDecodeEngine(Context* context);
+
+// Create a hardware video decode context to pair with the hardware video
+// decode engine. It can also be used with a software decode engine.
+//
+// Set |hardware_decoder| to true if this context is for a hardware video
+// engine.
+media::VideoDecodeContext* CreateVideoDecodeContext(Context* context,
+ bool hardware_decoder);
// TODO(gman): Remove this
void DisableShaderTranslation(Context* context);
diff --git a/chrome/renderer/gpu_video_decoder_host.cc b/chrome/renderer/gpu_video_decoder_host.cc
index 130aa32..eda93c6 100644
--- a/chrome/renderer/gpu_video_decoder_host.cc
+++ b/chrome/renderer/gpu_video_decoder_host.cc
@@ -9,21 +9,21 @@
#include "chrome/renderer/render_thread.h"
GpuVideoDecoderHost::GpuVideoDecoderHost(GpuVideoServiceHost* service_host,
- GpuChannelHost* channel_host,
+ IPC::Message::Sender* ipc_sender,
int context_route_id)
: gpu_video_service_host_(service_host),
- channel_host_(channel_host),
+ ipc_sender_(ipc_sender),
context_route_id_(context_route_id),
event_handler_(NULL),
buffer_id_serial_(0),
state_(kStateUninitialized),
input_buffer_busy_(false) {
- memset(&init_param_, 0, sizeof(init_param_));
+ memset(&config_, 0, sizeof(config_));
memset(&done_param_, 0, sizeof(done_param_));
}
void GpuVideoDecoderHost::OnChannelError() {
- channel_host_ = NULL;
+ ipc_sender_ = NULL;
}
void GpuVideoDecoderHost::OnMessageReceived(const IPC::Message& msg) {
@@ -42,48 +42,44 @@ void GpuVideoDecoderHost::OnMessageReceived(const IPC::Message& msg) {
IPC_END_MESSAGE_MAP()
}
-bool GpuVideoDecoderHost::Initialize(EventHandler* event_handler,
- const GpuVideoDecoderInitParam& param) {
+void GpuVideoDecoderHost::Initialize(
+ MessageLoop* message_loop, VideoDecodeEngine::EventHandler* event_handler,
+ media::VideoDecodeContext* context, const media::VideoCodecConfig& config) {
+ // TODO(hclam): Call |event_handler| here.
DCHECK_EQ(state_, kStateUninitialized);
// Save the event handler before we perform initialization operations so
// that we can report initialization events.
event_handler_ = event_handler;
- // TODO(hclam): Pass the context route ID here.
// TODO(hclam): This create video decoder operation is synchronous, need to
// make it asynchronous.
decoder_info_.context_id = context_route_id_;
- if (!channel_host_->Send(
+ if (!ipc_sender_->Send(
new GpuChannelMsg_CreateVideoDecoder(&decoder_info_))) {
LOG(ERROR) << "GpuChannelMsg_CreateVideoDecoder failed";
- return false;
+ return;
}
// Add the route so we'll receive messages.
gpu_video_service_host_->AddRoute(my_route_id(), this);
- init_param_ = param;
- if (!channel_host_ || !channel_host_->Send(
+ // Save the configuration parameters.
+ config_ = config;
+
+ // TODO(hclam): Initialize |param| with the right values.
+ GpuVideoDecoderInitParam param;
+ param.width = config.width;
+ param.height = config.height;
+
+ if (!ipc_sender_ || !ipc_sender_->Send(
new GpuVideoDecoderMsg_Initialize(route_id(), param))) {
LOG(ERROR) << "GpuVideoDecoderMsg_Initialize failed";
- return false;
- }
- return true;
-}
-
-bool GpuVideoDecoderHost::Uninitialize() {
- if (!channel_host_ || !channel_host_->Send(
- new GpuVideoDecoderMsg_Destroy(route_id()))) {
- LOG(ERROR) << "GpuVideoDecoderMsg_Destroy failed";
- return false;
+ return;
}
-
- gpu_video_service_host_->RemoveRoute(my_route_id());
- return true;
}
-void GpuVideoDecoderHost::EmptyThisBuffer(scoped_refptr<Buffer> buffer) {
+void GpuVideoDecoderHost::ConsumeVideoSample(scoped_refptr<Buffer> buffer) {
DCHECK_NE(state_, kStateUninitialized);
DCHECK_NE(state_, kStateFlushing);
@@ -96,7 +92,7 @@ void GpuVideoDecoderHost::EmptyThisBuffer(scoped_refptr<Buffer> buffer) {
SendInputBufferToGpu();
}
-void GpuVideoDecoderHost::FillThisBuffer(scoped_refptr<VideoFrame> frame) {
+void GpuVideoDecoderHost::ProduceVideoFrame(scoped_refptr<VideoFrame> frame) {
DCHECK_NE(state_, kStateUninitialized);
// Depends on who provides buffer. client could return buffer to
@@ -109,19 +105,34 @@ void GpuVideoDecoderHost::FillThisBuffer(scoped_refptr<VideoFrame> frame) {
// This eliminates one conversion step.
}
-bool GpuVideoDecoderHost::Flush() {
+void GpuVideoDecoderHost::Uninitialize() {
+ if (!ipc_sender_ || !ipc_sender_->Send(
+ new GpuVideoDecoderMsg_Destroy(route_id()))) {
+ LOG(ERROR) << "GpuVideoDecoderMsg_Destroy failed";
+ return;
+ }
+
+ gpu_video_service_host_->RemoveRoute(my_route_id());
+ return;
+}
+
+void GpuVideoDecoderHost::Flush() {
state_ = kStateFlushing;
- if (!channel_host_ || !channel_host_->Send(
+ if (!ipc_sender_ || !ipc_sender_->Send(
new GpuVideoDecoderMsg_Flush(route_id()))) {
LOG(ERROR) << "GpuVideoDecoderMsg_Flush failed";
- return false;
+ return;
}
input_buffer_queue_.clear();
// TODO(jiesun): because GpuVideoDeocder/GpuVideoDecoder are asynchronously.
// We need a way to make flush logic more clear. but I think ring buffer
// should make the busy flag obsolete, therefore I will leave it for now.
input_buffer_busy_ = false;
- return true;
+ return;
+}
+
+void GpuVideoDecoderHost::Seek() {
+ // TODO(hclam): Implement.
}
void GpuVideoDecoderHost::OnInitializeDone(
@@ -144,23 +155,29 @@ void GpuVideoDecoderHost::OnInitializeDone(
} while (0);
state_ = success ? kStateNormal : kStateError;
- event_handler_->OnInitializeDone(success, param);
+
+ media::VideoCodecInfo info;
+ info.success = success;
+ // TODO(hclam): There's too many unnecessary copies for width and height!
+ // Need to clean it up.
+ // TODO(hclam): Need to fill in more information.
+ info.stream_info.surface_width = config_.width;
+ info.stream_info.surface_height = config_.height;
+ event_handler_->OnInitializeComplete(info);
}
void GpuVideoDecoderHost::OnUninitializeDone() {
input_transfer_buffer_.reset();
-
- event_handler_->OnUninitializeDone();
+ event_handler_->OnUninitializeComplete();
}
void GpuVideoDecoderHost::OnFlushDone() {
state_ = kStateNormal;
- event_handler_->OnFlushDone();
+ event_handler_->OnFlushComplete();
}
void GpuVideoDecoderHost::OnEmptyThisBufferDone() {
- scoped_refptr<Buffer> buffer;
- event_handler_->OnEmptyBufferDone(buffer);
+ event_handler_->ProduceVideoSample(NULL);
}
void GpuVideoDecoderHost::OnConsumeVideoFrame(int32 frame_id, int64 timestamp,
@@ -171,16 +188,10 @@ void GpuVideoDecoderHost::OnConsumeVideoFrame(int32 frame_id, int64 timestamp,
VideoFrame::CreateEmptyFrame(&frame);
} else {
// TODO(hclam): Use |frame_id| to find the VideoFrame.
- VideoFrame::GlTexture textures[3] = { 0, 0, 0 };
- media::VideoFrame::CreateFrameGlTexture(
- media::VideoFrame::RGBA, init_param_.width, init_param_.height,
- textures,
- base::TimeDelta::FromMicroseconds(timestamp),
- base::TimeDelta::FromMicroseconds(duration),
- &frame);
}
- event_handler_->OnFillBufferDone(frame);
+ // TODO(hclam): Call the event handler.
+ event_handler_->ConsumeVideoFrame(frame);
}
void GpuVideoDecoderHost::OnEmptyThisBufferACK() {
@@ -204,7 +215,7 @@ void GpuVideoDecoderHost::SendInputBufferToGpu() {
param.size = buffer->GetDataSize();
param.timestamp = buffer->GetTimestamp().InMicroseconds();
memcpy(input_transfer_buffer_->memory(), buffer->GetData(), param.size);
- if (!channel_host_ || !channel_host_->Send(
+ if (!ipc_sender_ || !ipc_sender_->Send(
new GpuVideoDecoderMsg_EmptyThisBuffer(route_id(), param))) {
LOG(ERROR) << "GpuVideoDecoderMsg_EmptyThisBuffer failed";
}
diff --git a/chrome/renderer/gpu_video_decoder_host.h b/chrome/renderer/gpu_video_decoder_host.h
index cfcdf88..68953f6 100644
--- a/chrome/renderer/gpu_video_decoder_host.h
+++ b/chrome/renderer/gpu_video_decoder_host.h
@@ -10,59 +10,64 @@
#include "base/singleton.h"
#include "chrome/common/gpu_video_common.h"
#include "chrome/renderer/gpu_channel_host.h"
-#include "ipc/ipc_channel_proxy.h"
+#include "ipc/ipc_message.h"
#include "media/base/buffers.h"
#include "media/base/video_frame.h"
+#include "media/video/video_decode_engine.h"
using media::VideoFrame;
using media::Buffer;
class GpuVideoServiceHost;
-class GpuVideoDecoderHost
- : public base::RefCountedThreadSafe<GpuVideoDecoderHost>,
- public IPC::Channel::Listener {
+// This class is used to talk to GpuVideoDecoder in the GPU process through
+// IPC messages. It implements the interface of VideoDecodeEngine so users
+// view it as a regular video decode engine, the implementation is a portal
+// to the GPU process.
+//
+// THREAD SEMANTICS
+//
+// All methods of this class can be accessed on any thread. A message loop
+// needs to be provided to class through Initialize() for accessing the
+// IPC channel. Event handlers are called on that message loop.
+//
+// Since this class is not refcounted, it is important to delete this
+// object only after OnUninitializeCompelte() is called.
+class GpuVideoDecoderHost : public media::VideoDecodeEngine,
+ public IPC::Channel::Listener {
public:
- class EventHandler {
- public:
- virtual void OnInitializeDone(
- bool success,
- const GpuVideoDecoderInitDoneParam& param) = 0;
- virtual void OnUninitializeDone() = 0;
- virtual void OnFlushDone() = 0;
- virtual void OnEmptyBufferDone(scoped_refptr<Buffer> buffer) = 0;
- virtual void OnFillBufferDone(scoped_refptr<VideoFrame> frame) = 0;
- virtual void OnDeviceError() = 0;
- };
-
- typedef enum {
- kStateUninitialized,
- kStateNormal,
- kStateError,
- kStateFlushing,
- } GpuVideoDecoderHostState;
+ virtual ~GpuVideoDecoderHost() {}
// IPC::Channel::Listener.
virtual void OnChannelConnected(int32 peer_pid) {}
virtual void OnChannelError();
virtual void OnMessageReceived(const IPC::Message& message);
- bool Initialize(EventHandler* handler, const GpuVideoDecoderInitParam& param);
- bool Uninitialize();
- void EmptyThisBuffer(scoped_refptr<Buffer> buffer);
- void FillThisBuffer(scoped_refptr<VideoFrame> frame);
- bool Flush();
-
- int32 decoder_id() { return decoder_info_.decoder_id; }
- int32 route_id() { return decoder_info_.decoder_route_id; }
- int32 my_route_id() { return decoder_info_.decoder_host_route_id; }
-
- virtual ~GpuVideoDecoderHost() {}
+ // media::VideoDecodeEngine implementation.
+ virtual void Initialize(MessageLoop* message_loop,
+ VideoDecodeEngine::EventHandler* event_handler,
+ media::VideoDecodeContext* context,
+ const media::VideoCodecConfig& config);
+ virtual void ConsumeVideoSample(scoped_refptr<Buffer> buffer);
+ virtual void ProduceVideoFrame(scoped_refptr<VideoFrame> frame);
+ virtual void Uninitialize();
+ virtual void Flush();
+ virtual void Seek();
private:
friend class GpuVideoServiceHost;
+
+ // Internal states.
+ enum GpuVideoDecoderHostState {
+ kStateUninitialized,
+ kStateNormal,
+ kStateError,
+ kStateFlushing,
+ };
+
+ // Private constructor.
GpuVideoDecoderHost(GpuVideoServiceHost* service_host,
- GpuChannelHost* channel_host,
+ IPC::Message::Sender* ipc_sender,
int context_route_id);
// Input message handler.
@@ -77,10 +82,16 @@ class GpuVideoDecoderHost
// Helper function.
void SendInputBufferToGpu();
+ // Getter methods for IDs.
+ int32 decoder_id() { return decoder_info_.decoder_id; }
+ int32 route_id() { return decoder_info_.decoder_route_id; }
+ int32 my_route_id() { return decoder_info_.decoder_host_route_id; }
+
// We expect that GpuVideoServiceHost's always available during our life span.
GpuVideoServiceHost* gpu_video_service_host_;
- GpuChannelHost* channel_host_;
+ // Sends IPC messages to the GPU process.
+ IPC::Message::Sender* ipc_sender_;
// Route ID of the GLES2 context in the GPU process.
int context_route_id_;
@@ -95,7 +106,7 @@ class GpuVideoDecoderHost
int32 buffer_id_serial_;
// Hold information about GpuVideoDecoder configuration.
- GpuVideoDecoderInitParam init_param_;
+ media::VideoCodecConfig config_;
// Hold information about output surface format, etc.
GpuVideoDecoderInitDoneParam done_param_;
diff --git a/chrome/renderer/media/gles2_video_decode_context.cc b/chrome/renderer/media/gles2_video_decode_context.cc
index b7eec02..50180ef 100644
--- a/chrome/renderer/media/gles2_video_decode_context.cc
+++ b/chrome/renderer/media/gles2_video_decode_context.cc
@@ -5,8 +5,10 @@
#include "chrome/renderer/media/gles2_video_decode_context.h"
Gles2VideoDecodeContext::Gles2VideoDecodeContext(
- StorageType type, ggl::Context* context)
- : message_loop_(MessageLoop::current()), type_(type), context_(context) {
+ MessageLoop* message_loop, bool memory_mapped, ggl::Context* context)
+ : message_loop_(message_loop),
+ memory_mapped_(memory_mapped),
+ context_(context) {
}
Gles2VideoDecodeContext::~Gles2VideoDecodeContext() {
diff --git a/chrome/renderer/media/gles2_video_decode_context.h b/chrome/renderer/media/gles2_video_decode_context.h
index 4f556ab..3e56e73 100644
--- a/chrome/renderer/media/gles2_video_decode_context.h
+++ b/chrome/renderer/media/gles2_video_decode_context.h
@@ -24,38 +24,35 @@ class Context;
// commands specific to Chrome's renderer process to provide needed resources.
//
// There are two different kinds of video frame storage provided by this class:
-// 1. Memory mapped YUV textures (aka software decoding mode).
-// Each video frame allocated is backed by 3 luminance textures carrying
-// the Y, U and V planes.
+// 1. Memory mapped textures (aka software decoding mode).
+// Each texture is memory mapped and appears to the VideoDecodeEngine as
+// system memory.
//
-// Furthermore each texture is memory mapped and appears to the
-// VideoDecodeEngine as 3 planes backed by system memory.
-//
-// The usage of these 3 textures is that the VideoDecodeEngine is performing
+// The usage of the textures is that the VideoDecodeEngine is performing
// software video decoding and use them as if they are allocated in plain
// system memory (in fact they are allocated in system memory and shared
// bwith the GPU process). An additional step of uploading the content to
// video memory is needed. Since VideoDecodeEngine is unaware of the video
-// memory, this upload operation is performed by video renderer provided by
-// Chrome.
+// memory, this upload operation is performed by calling
+// UploadToVideoFrame().
//
// After the content is uploaded to video memory, WebKit will see the video
-// frame as 3 textures and will perform the necessary operations for
+// frame as textures and will perform the necessary operations for
// rendering.
//
-// 2. RGBA texture (aka hardware decoding mode).
-// In this mode of operation each video frame is backed by a RGBA texture.
-// This is used only when hardware video decoding is used. The texture needs
-// to be generated and allocated inside the renderer process first. This
-// will establish a translation between texture ID in the renderer process
-// and the GPU process.
+// 2. Opaque textures (aka hardware decoding mode).
+// In this mode of operation each video frame is backed by some opaque
+// textures. This is used only when hardware video decoding is used. The
+// textures needs to be generated and allocated inside the renderer process
+// first. This will establish a translation between texture ID in the
+// renderer process and the GPU process.
//
// The texture ID generated is used by IpcVideoDecodeEngine only to be sent
// the GPU process. Inside the GPU process the texture ID is translated to
// a real texture ID inside the actual context. The real texture ID is then
// assigned to the hardware video decoder for storing the video frame.
//
-// WebKit will see the video frame as a normal RGBA texture and perform
+// WebKit will see the video frame as a normal textures and perform
// necessary render operations.
//
// In both operation modes, the objective is to have WebKit see the video frames
@@ -63,36 +60,22 @@ class Context;
//
// THREAD SEMANTICS
//
-// This class is accessed on two threads, namely the Render Thread and the
-// Video Decoder Thread.
-//
-// GLES2 context and all OpenGL method calls should be accessed on the Render
-// Thread.
+// All methods of this class can be called on any thread. GLES2 context and all
+// OpenGL method calls are accessed on the Render Thread. As as result all Tasks
+// given to this object are executed on the Render Thread.
//
-// VideoDecodeContext implementations are accessed on the Video Decoder Thread.
+// Since this class is not refcounted, it is important to destroy objects of
+// this class only when the Task given to Destroy() is called.
//
class Gles2VideoDecodeContext : public media::VideoDecodeContext {
public:
- enum StorageType {
- // This video decode context provides YUV textures as storage. This is used
- // only in software decoding mode.
- kMemoryMappedYuvTextures,
-
- // This video decode context provides RBGA textures as storage. This is
- // used in hardware decoding mode.
- kRgbaTextures,
- };
-
- //--------------------------------------------------------------------------
- // Render Thread
- Gles2VideoDecodeContext(StorageType type, ggl::Context* context);
-
- // TODO(hclam): Need to figure out which thread destroys this object.
+ // |message_loop| is the message of the Render Thread.
+ // |memory_mapped| determines if textures allocated are memory mapped.
+ // |context| is the graphics context for generating textures.
+ Gles2VideoDecodeContext(MessageLoop* message_loop,
+ bool memory_mapped, ggl::Context* context);
virtual ~Gles2VideoDecodeContext();
- //--------------------------------------------------------------------------
- // Video Decoder Thread
-
// media::VideoDecodeContext implementation.
virtual void* GetDevice();
virtual void AllocateVideoFrames(
@@ -104,18 +87,15 @@ class Gles2VideoDecodeContext : public media::VideoDecodeContext {
Task* task);
virtual void Destroy(Task* task);
- //--------------------------------------------------------------------------
- // Any thread
// Accessor of the current mode of this decode context.
- bool IsMemoryMapped() const { return type_ == kMemoryMappedYuvTextures; }
+ bool IsMemoryMapped() const { return memory_mapped_; }
private:
- // Message loop that this object lives on. This is the message loop that
- // this object is created.
+ // Message loop for Render Thread.
MessageLoop* message_loop_;
// Type of storage provided by this class.
- StorageType type_;
+ bool memory_mapped_;
// Pointer to the GLES2 context.
ggl::Context* context_;
diff --git a/chrome/renderer/media/ipc_video_decoder.cc b/chrome/renderer/media/ipc_video_decoder.cc
index eda8696..4206380 100644
--- a/chrome/renderer/media/ipc_video_decoder.cc
+++ b/chrome/renderer/media/ipc_video_decoder.cc
@@ -1,6 +1,6 @@
-// Copyright (c) 2010 The Chromium Authors. All rights reserved. Use of this
-// source code is governed by a BSD-style license that can be found in the
-// LICENSE file.
+// Copyright (c) 2010 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
#include "chrome/renderer/media/ipc_video_decoder.h"
@@ -17,15 +17,13 @@
#include "media/ffmpeg/ffmpeg_common.h"
#include "media/ffmpeg/ffmpeg_util.h"
#include "media/filters/ffmpeg_interfaces.h"
+#include "media/video/video_decode_engine.h"
IpcVideoDecoder::IpcVideoDecoder(MessageLoop* message_loop,
ggl::Context* ggl_context)
: width_(0),
height_(0),
- state_(kUnInitialized),
- pending_reads_(0),
- pending_requests_(0),
- renderer_thread_message_loop_(message_loop),
+ decode_engine_message_loop_(message_loop),
ggl_context_(ggl_context) {
}
@@ -34,30 +32,23 @@ IpcVideoDecoder::~IpcVideoDecoder() {
void IpcVideoDecoder::Initialize(media::DemuxerStream* demuxer_stream,
media::FilterCallback* callback) {
- if (MessageLoop::current() != renderer_thread_message_loop_) {
- renderer_thread_message_loop_->PostTask(
- FROM_HERE,
- NewRunnableMethod(this,
- &IpcVideoDecoder::Initialize,
- demuxer_stream,
- callback));
- return;
- }
+ // It doesn't matter which thread we perform initialization because
+ // all this method does is create objects and delegate the initialize
+ // messsage.
DCHECK(!demuxer_stream_);
demuxer_stream_ = demuxer_stream;
initialize_callback_.reset(callback);
- // We require bit stream converter for openmax hardware decoder.
- // TODO(hclam): This is a wrong place to initialize the demuxer stream's
- // bitstream converter.
+ // We require bit stream converter for hardware decoder.
demuxer_stream->EnableBitstreamConverter();
// Get the AVStream by querying for the provider interface.
media::AVStreamProvider* av_stream_provider;
if (!demuxer_stream->QueryInterface(&av_stream_provider)) {
- GpuVideoDecoderInitDoneParam param;
- OnInitializeDone(false, param);
+ host()->SetError(media::PIPELINE_ERROR_DECODE);
+ callback->Run();
+ delete callback;
return;
}
@@ -65,296 +56,141 @@ void IpcVideoDecoder::Initialize(media::DemuxerStream* demuxer_stream,
width_ = av_stream->codec->width;
height_ = av_stream->codec->height;
- // Switch GL context.
- bool ret = ggl::MakeCurrent(ggl_context_);
- DCHECK(ret) << "Failed to switch GL context";
+ // Create a video decode context that assocates with the graphics
+ // context.
+ decode_context_.reset(ggl::CreateVideoDecodeContext(ggl_context_, true));
- // Generate textures to be used by the hardware video decoder in the GPU
- // process.
- // TODO(hclam): Allocation of textures should be done based on the request
- // of the GPU process.
- GLuint texture;
- glGenTextures(1, &texture);
- glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA, width_, height_, 0, GL_RGBA,
- GL_UNSIGNED_BYTE, NULL);
- texture_ = texture;
-
- // Create a hardware video decoder handle for IPC communication.
- gpu_video_decoder_host_ = ggl::CreateVideoDecoder(ggl_context_);
+ // Create a hardware video decoder handle.
+ decode_engine_.reset(ggl::CreateVideoDecodeEngine(ggl_context_));
// Initialize hardware decoder.
- GpuVideoDecoderInitParam param = {0};
+ media::VideoCodecConfig param;
+ memset(&param, 0, sizeof(param));
param.width = width_;
param.height = height_;
- if (!gpu_video_decoder_host_->Initialize(this, param)) {
- GpuVideoDecoderInitDoneParam param;
- OnInitializeDone(false, param);
- }
-}
-void IpcVideoDecoder::OnInitializeDone(
- bool success, const GpuVideoDecoderInitDoneParam& param) {
- if (MessageLoop::current() != renderer_thread_message_loop_) {
- renderer_thread_message_loop_->PostTask(
- FROM_HERE,
- NewRunnableMethod(this,
- &IpcVideoDecoder::OnInitializeDone,
- success,
- param));
- return;
- }
-
- media::AutoCallbackRunner done_runner(initialize_callback_.release());
-
- if (success) {
- media_format_.SetAsString(media::MediaFormat::kMimeType,
- media::mime_type::kUncompressedVideo);
- media_format_.SetAsInteger(media::MediaFormat::kWidth, width_);
- media_format_.SetAsInteger(media::MediaFormat::kHeight, height_);
- media_format_.SetAsInteger(
- media::MediaFormat::kSurfaceType,
- static_cast<int>(media::VideoFrame::TYPE_GL_TEXTURE));
- state_ = kPlaying;
- } else {
- LOG(ERROR) << "IpcVideoDecoder initialization failed!";
- host()->SetError(media::PIPELINE_ERROR_DECODE);
- }
+ // TODO(hclam): Move VideoDecodeEngine to IO Thread, this will avoid
+ // dead lock during teardown.
+ // VideoDecodeEngine will perform initialization on the message loop
+ // given to it so it doesn't matter on which thread we are calling this.
+ decode_engine_->Initialize(decode_engine_message_loop_, this,
+ decode_context_.get(), param);
}
void IpcVideoDecoder::Stop(media::FilterCallback* callback) {
- if (MessageLoop::current() != renderer_thread_message_loop_) {
- renderer_thread_message_loop_->PostTask(
- FROM_HERE,
- NewRunnableMethod(this,
- &IpcVideoDecoder::Stop,
- callback));
- return;
- }
-
stop_callback_.reset(callback);
- if (!gpu_video_decoder_host_->Uninitialize()) {
- LOG(ERROR) << "gpu video decoder destroy failed";
- IpcVideoDecoder::OnUninitializeDone();
- }
-}
-
-void IpcVideoDecoder::OnUninitializeDone() {
- if (MessageLoop::current() != renderer_thread_message_loop_) {
- renderer_thread_message_loop_->PostTask(
- FROM_HERE,
- NewRunnableMethod(this,
- &IpcVideoDecoder::OnUninitializeDone));
- return;
- }
-
- media::AutoCallbackRunner done_runner(stop_callback_.release());
-
- state_ = kStopped;
+ decode_engine_->Uninitialize();
}
void IpcVideoDecoder::Pause(media::FilterCallback* callback) {
- Flush(callback); // TODO(jiesun): move this to flush().
+ // TODO(hclam): It looks like that pause is not necessary so implement this
+ // later.
+ callback->Run();
+ delete callback;
}
void IpcVideoDecoder::Flush(media::FilterCallback* callback) {
- if (MessageLoop::current() != renderer_thread_message_loop_) {
- renderer_thread_message_loop_->PostTask(
- FROM_HERE,
- NewRunnableMethod(this,
- &IpcVideoDecoder::Flush,
- callback));
- return;
- }
-
- state_ = kFlushing;
-
flush_callback_.reset(callback);
-
- if (!gpu_video_decoder_host_->Flush()) {
- LOG(ERROR) << "gpu video decoder flush failed";
- OnFlushDone();
- }
-}
-
-void IpcVideoDecoder::OnFlushDone() {
- if (MessageLoop::current() != renderer_thread_message_loop_) {
- renderer_thread_message_loop_->PostTask(
- FROM_HERE,
- NewRunnableMethod(this,
- &IpcVideoDecoder::OnFlushDone));
- return;
- }
-
- if (pending_reads_ == 0 && pending_requests_ == 0 && flush_callback_.get()) {
- flush_callback_->Run();
- flush_callback_.reset();
- }
+ decode_engine_->Flush();
}
void IpcVideoDecoder::Seek(base::TimeDelta time,
media::FilterCallback* callback) {
- if (MessageLoop::current() != renderer_thread_message_loop_) {
- renderer_thread_message_loop_->PostTask(
- FROM_HERE,
- NewRunnableMethod(this,
- &IpcVideoDecoder::Seek,
- time,
- callback));
- return;
- }
-
- OnSeekComplete(callback);
+ seek_callback_.reset(callback);
+ decode_engine_->Seek();
}
-void IpcVideoDecoder::OnSeekComplete(media::FilterCallback* callback) {
- if (MessageLoop::current() != renderer_thread_message_loop_) {
- renderer_thread_message_loop_->PostTask(
- FROM_HERE,
- NewRunnableMethod(this,
- &IpcVideoDecoder::OnSeekComplete,
- callback));
- return;
- }
-
- media::AutoCallbackRunner done_runner(callback);
-
- state_ = kPlaying;
+void IpcVideoDecoder::OnInitializeComplete(const media::VideoCodecInfo& info) {
+ DCHECK_EQ(decode_engine_message_loop_, MessageLoop::current());
- for (int i = 0; i < 20; ++i) {
- demuxer_stream_->Read(
- NewCallback(this,
- &IpcVideoDecoder::OnReadComplete));
- ++pending_reads_;
+ if (info.success) {
+ media_format_.SetAsString(media::MediaFormat::kMimeType,
+ media::mime_type::kUncompressedVideo);
+ media_format_.SetAsInteger(media::MediaFormat::kWidth,
+ info.stream_info.surface_width);
+ media_format_.SetAsInteger(media::MediaFormat::kHeight,
+ info.stream_info.surface_height);
+ media_format_.SetAsInteger(
+ media::MediaFormat::kSurfaceType,
+ static_cast<int>(media::VideoFrame::TYPE_GL_TEXTURE));
+ } else {
+ LOG(ERROR) << "IpcVideoDecoder initialization failed!";
+ host()->SetError(media::PIPELINE_ERROR_DECODE);
}
-}
-void IpcVideoDecoder::OnReadComplete(media::Buffer* buffer) {
- scoped_refptr<media::Buffer> buffer_ref = buffer;
- ReadCompleteTask(buffer_ref);
+ initialize_callback_->Run();
+ initialize_callback_.reset();
}
-void IpcVideoDecoder::ReadCompleteTask(
- scoped_refptr<media::Buffer> buffer) {
- if (MessageLoop::current() != renderer_thread_message_loop_) {
- renderer_thread_message_loop_->PostTask(
- FROM_HERE,
- NewRunnableMethod(this,
- &IpcVideoDecoder::ReadCompleteTask,
- buffer));
- return;
- }
-
- DCHECK_GT(pending_reads_, 0u);
- --pending_reads_;
+void IpcVideoDecoder::OnUninitializeComplete() {
+ DCHECK_EQ(decode_engine_message_loop_, MessageLoop::current());
- if (state_ == kStopped || state_ == kEnded) {
- // Just discard the input buffers
- return;
- }
-
- if (state_ == kFlushing) {
- if (pending_reads_ == 0 && pending_requests_ == 0) {
- flush_callback_->Run();
- flush_callback_.reset();
- state_ = kPlaying;
- }
- return;
- }
- // Transition to kFlushCodec on the first end of input stream buffer.
- if (state_ == kPlaying && buffer->IsEndOfStream()) {
- state_ = kFlushCodec;
- }
+ // After the decode engine is uninitialized we are safe to destroy the decode
+ // context. The task will add a refcount to this object so don't need to worry
+ // about objects lifetime.
+ decode_context_->Destroy(
+ NewRunnableMethod(this, &IpcVideoDecoder::OnDestroyComplete));
- gpu_video_decoder_host_->EmptyThisBuffer(buffer);
+ // We don't need to wait for destruction of decode context to complete because
+ // it can happen asynchronously. This object and decode context will live until
+ // the destruction task is called.
+ stop_callback_->Run();
+ stop_callback_.reset();
}
-void IpcVideoDecoder::ProduceVideoFrame(scoped_refptr<VideoFrame> video_frame) {
- if (MessageLoop::current() != renderer_thread_message_loop_) {
- renderer_thread_message_loop_->PostTask(
- FROM_HERE,
- NewRunnableMethod(this,
- &IpcVideoDecoder::ProduceVideoFrame,
- video_frame));
- return;
- }
-
- // Synchronized flushing before stop should prevent this.
- DCHECK_NE(state_, kStopped);
-
- // Notify decode engine the available of new frame.
- ++pending_requests_;
-
- VideoFrame::GlTexture textures[3] = { texture_, 0, 0 };
- scoped_refptr<VideoFrame> frame;
- media::VideoFrame::CreateFrameGlTexture(
- media::VideoFrame::RGBA, width_, height_, textures,
- base::TimeDelta(), base::TimeDelta(), &frame);
- gpu_video_decoder_host_->FillThisBuffer(frame);
+void IpcVideoDecoder::OnFlushComplete() {
+ DCHECK_EQ(decode_engine_message_loop_, MessageLoop::current());
+ flush_callback_->Run();
+ flush_callback_.reset();
}
-void IpcVideoDecoder::OnFillBufferDone(
- scoped_refptr<media::VideoFrame> video_frame) {
- if (MessageLoop::current() != renderer_thread_message_loop_) {
- renderer_thread_message_loop_->PostTask(
- FROM_HERE,
- NewRunnableMethod(this,
- &IpcVideoDecoder::OnFillBufferDone,
- video_frame));
- return;
- }
+void IpcVideoDecoder::OnSeekComplete() {
+ DCHECK_EQ(decode_engine_message_loop_, MessageLoop::current());
+ seek_callback_->Run();
+ seek_callback_.reset();
+}
- if (video_frame.get()) {
- --pending_requests_;
- VideoFrameReady(video_frame);
- if (state_ == kFlushing && pending_reads_ == 0 && pending_requests_ == 0) {
- CHECK(flush_callback_.get());
- flush_callback_->Run();
- flush_callback_.reset();
- state_ = kPlaying;
- }
+void IpcVideoDecoder::OnError() {
+ DCHECK_EQ(decode_engine_message_loop_, MessageLoop::current());
+ host()->SetError(media::PIPELINE_ERROR_DECODE);
+}
- } else {
- if (state_ == kFlushCodec) {
- // When in kFlushCodec, any errored decode, or a 0-lengthed frame,
- // is taken as a signal to stop decoding.
- state_ = kEnded;
- scoped_refptr<VideoFrame> video_frame;
- VideoFrame::CreateEmptyFrame(&video_frame);
- VideoFrameReady(video_frame);
- }
- }
+// This methid is called by Demuxer after a demuxed packet is produced.
+void IpcVideoDecoder::OnReadComplete(media::Buffer* buffer) {
+ decode_engine_->ConsumeVideoSample(buffer);
}
-void IpcVideoDecoder::OnEmptyBufferDone(scoped_refptr<media::Buffer> buffer) {
- if (MessageLoop::current() != renderer_thread_message_loop_) {
- renderer_thread_message_loop_->PostTask(
- FROM_HERE,
- NewRunnableMethod(this,
- &IpcVideoDecoder::OnEmptyBufferDone,
- buffer));
- return;
- }
+void IpcVideoDecoder::OnDestroyComplete() {
+ // We don't need to do anything in this method. Destruction of objects will
+ // occur as soon as refcount goes to 0.
+}
- // TODO(jiesun): We haven't recycle input buffer yet.
- demuxer_stream_->Read(NewCallback(this, &IpcVideoDecoder::OnReadComplete));
- ++pending_reads_;
+// This method is called by VideoRenderer. We delegate the method call to
+// VideoDecodeEngine.
+void IpcVideoDecoder::ProduceVideoFrame(
+ scoped_refptr<media::VideoFrame> video_frame) {
+ decode_engine_->ProduceVideoFrame(video_frame);
}
-void IpcVideoDecoder::OnDeviceError() {
- host()->SetError(media::PIPELINE_ERROR_DECODE);
+// This method is called by VideoDecodeEngine that a video frame is produced.
+// This is then passed to VideoRenderer.
+void IpcVideoDecoder::ConsumeVideoFrame(
+ scoped_refptr<media::VideoFrame> video_frame) {
+ DCHECK(video_frame);
+ VideoFrameReady(video_frame);
}
-bool IpcVideoDecoder::ProvidesBuffer() {
- return true;
+// This method is called by VideoDecodeEngine to request a video frame. The
+// request is passed to demuxer.
+void IpcVideoDecoder::ProduceVideoSample(scoped_refptr<media::Buffer> buffer) {
+ demuxer_stream_->Read(NewCallback(this, &IpcVideoDecoder::OnReadComplete));
}
// static
media::FilterFactory* IpcVideoDecoder::CreateFactory(
MessageLoop* message_loop, ggl::Context* ggl_context) {
- return new media::FilterFactoryImpl2<IpcVideoDecoder,
- MessageLoop*,
- ggl::Context*>(
- message_loop, ggl_context);
+ return new media::FilterFactoryImpl2<
+ IpcVideoDecoder, MessageLoop*, ggl::Context*>(message_loop, ggl_context);
}
// static
diff --git a/chrome/renderer/media/ipc_video_decoder.h b/chrome/renderer/media/ipc_video_decoder.h
index 0309ccd..d5ab685 100644
--- a/chrome/renderer/media/ipc_video_decoder.h
+++ b/chrome/renderer/media/ipc_video_decoder.h
@@ -6,10 +6,11 @@
#define CHROME_RENDERER_MEDIA_IPC_VIDEO_DECODER_H_
#include "base/time.h"
-#include "chrome/renderer/gpu_video_service_host.h"
#include "media/base/pts_heap.h"
#include "media/base/video_frame.h"
#include "media/filters/decoder_base.h"
+#include "media/video/video_decode_engine.h"
+#include "media/video/video_decode_context.h"
struct AVRational;
@@ -18,7 +19,7 @@ class Context;
} // namespace ggl
class IpcVideoDecoder : public media::VideoDecoder,
- public GpuVideoDecoderHost::EventHandler {
+ public media::VideoDecodeEngine::EventHandler {
public:
explicit IpcVideoDecoder(MessageLoop* message_loop,
ggl::Context* ggl_context);
@@ -40,51 +41,37 @@ class IpcVideoDecoder : public media::VideoDecoder,
virtual const media::MediaFormat& media_format() { return media_format_; }
virtual void ProduceVideoFrame(scoped_refptr<media::VideoFrame> video_frame);
- // GpuVideoDecoderHost::EventHandler.
- virtual void OnInitializeDone(bool success,
- const GpuVideoDecoderInitDoneParam& param);
- virtual void OnUninitializeDone();
- virtual void OnFlushDone();
- virtual void OnEmptyBufferDone(scoped_refptr<media::Buffer> buffer);
- virtual void OnFillBufferDone(scoped_refptr<media::VideoFrame> frame);
- virtual void OnDeviceError();
- virtual bool ProvidesBuffer();
+ // TODO(hclam): Remove this method.
+ virtual bool ProvidesBuffer() { return true; }
+
+ // VideoDecodeEngine::EventHandler implementation.
+ virtual void OnInitializeComplete(const media::VideoCodecInfo& info);
+ virtual void OnUninitializeComplete();
+ virtual void OnFlushComplete();
+ virtual void OnSeekComplete();
+ virtual void OnError();
+ virtual void OnFormatChange(media::VideoStreamInfo stream_info);
+ virtual void ProduceVideoSample(scoped_refptr<media::Buffer> buffer);
+ virtual void ConsumeVideoFrame(scoped_refptr<media::VideoFrame> frame);
private:
- enum DecoderState {
- kUnInitialized,
- kPlaying,
- kFlushing,
- kPausing,
- kFlushCodec,
- kEnded,
- kStopped,
- };
-
- void OnSeekComplete(media::FilterCallback* callback);
void OnReadComplete(media::Buffer* buffer);
- void ReadCompleteTask(scoped_refptr<media::Buffer> buffer);
+ void OnDestroyComplete();
int32 width_;
int32 height_;
media::MediaFormat media_format_;
scoped_ptr<media::FilterCallback> flush_callback_;
+ scoped_ptr<media::FilterCallback> seek_callback_;
scoped_ptr<media::FilterCallback> initialize_callback_;
scoped_ptr<media::FilterCallback> stop_callback_;
- DecoderState state_;
-
- // Tracks the number of asynchronous reads issued to |demuxer_stream_|.
- // Using size_t since it is always compared against deque::size().
- size_t pending_reads_;
- // Tracks the number of asynchronous reads issued from renderer.
- size_t pending_requests_;
-
// Pointer to the demuxer stream that will feed us compressed buffers.
scoped_refptr<media::DemuxerStream> demuxer_stream_;
- MessageLoop* renderer_thread_message_loop_;
+ // This is the message loop that we should assign to VideoDecodeEngine.
+ MessageLoop* decode_engine_message_loop_;
// A context for allocating textures and issuing GLES2 commands.
// TODO(hclam): A ggl::Context lives on the Render Thread while this object
@@ -92,14 +79,14 @@ class IpcVideoDecoder : public media::VideoDecoder,
// and destruction of the context.
ggl::Context* ggl_context_;
- // Handle to the hardware video decoder. This object will use IPC to
- // communicate with the decoder in the GPU process.
- scoped_refptr<GpuVideoDecoderHost> gpu_video_decoder_host_;
+ // This VideoDecodeEngine translate our requests to IPC commands to the
+ // GPU process.
+ // VideoDecodeEngine should run on IO Thread instead of Render Thread to
+ // avoid dead lock during tear down of the media pipeline.
+ scoped_ptr<media::VideoDecodeEngine> decode_engine_;
- // Texture that contains the video frame.
- // TODO(hclam): Instead of one texture, we should have a set of textures
- // as requested by the hardware video decode engine in the GPU process.
- unsigned int texture_;
+ // Decoding context to be used by VideoDecodeEngine.
+ scoped_ptr<media::VideoDecodeContext> decode_context_;
DISALLOW_COPY_AND_ASSIGN(IpcVideoDecoder);
};