diff options
author | mlloyd@chromium.org <mlloyd@chromium.org@0039d316-1c4b-4281-b951-d872f2087c98> | 2010-09-17 14:19:15 +0000 |
---|---|---|
committer | mlloyd@chromium.org <mlloyd@chromium.org@0039d316-1c4b-4281-b951-d872f2087c98> | 2010-09-17 14:19:15 +0000 |
commit | dc95389fc803581de77c7fb617b1ef4870c9cbd5 (patch) | |
tree | ec919d1254ff839c690af9e8327e362bff0a9bac /chrome/gpu | |
parent | 425bbb99f4faa4bdf90d9bca09174baa467aeaae (diff) | |
download | chromium_src-dc95389fc803581de77c7fb617b1ef4870c9cbd5.zip chromium_src-dc95389fc803581de77c7fb617b1ef4870c9cbd5.tar.gz chromium_src-dc95389fc803581de77c7fb617b1ef4870c9cbd5.tar.bz2 |
Revert 59784 - GpuVideoDecoder to use GpuVideoDevice and IPC messages to complete VideoFrame allocation
GpuVideoDecedoer now sends IPC messages to allocation GL textures. It also uses
GpuVideoDevice to create VideoFrames from the GL textures. These GL textures
are passed into VideoDecodeEngine.
BUG=53714
TEST=Tree is green
Review URL: http://codereview.chromium.org/3335019
TBR=hclam@chromium.org
Review URL: http://codereview.chromium.org/3451007
git-svn-id: svn://svn.chromium.org/chrome/trunk/src@59790 0039d316-1c4b-4281-b951-d872f2087c98
Diffstat (limited to 'chrome/gpu')
-rw-r--r-- | chrome/gpu/gpu_video_decoder.cc | 183 | ||||
-rw-r--r-- | chrome/gpu/gpu_video_decoder.h | 83 |
2 files changed, 135 insertions, 131 deletions
diff --git a/chrome/gpu/gpu_video_decoder.cc b/chrome/gpu/gpu_video_decoder.cc index eb1823a..bcdce82 100644 --- a/chrome/gpu/gpu_video_decoder.cc +++ b/chrome/gpu/gpu_video_decoder.cc @@ -53,17 +53,63 @@ bool GpuVideoDecoder::CreateInputTransferBuffer( return true; } +bool GpuVideoDecoder::CreateOutputTransferBuffer( + uint32 size, + base::SharedMemoryHandle* handle) { + output_transfer_buffer_.reset(new base::SharedMemory); + if (!output_transfer_buffer_.get()) + return false; + + if (!output_transfer_buffer_->Create(std::wstring(), false, false, size)) + return false; + + if (!output_transfer_buffer_->Map(size)) + return false; + + if (!output_transfer_buffer_->ShareToProcess(renderer_handle_, handle)) + return false; + + return true; +} + +void GpuVideoDecoder::CreateVideoFrameOnTransferBuffer() { + const base::TimeDelta kZero; + uint8* data[media::VideoFrame::kMaxPlanes]; + int32 strides[media::VideoFrame::kMaxPlanes]; + memset(data, 0, sizeof(data)); + memset(strides, 0, sizeof(strides)); + data[0] = static_cast<uint8*>(output_transfer_buffer_->memory()); + data[1] = data[0] + config_.width * config_.height; + data[2] = data[1] + config_.width * config_.height / 4; + strides[0] = config_.width; + strides[1] = strides[2] = config_.width >> 1; + media::VideoFrame:: CreateFrameExternal( + media::VideoFrame::TYPE_SYSTEM_MEMORY, + media::VideoFrame::YV12, + config_.width, config_.height, 3, + data, strides, + kZero, kZero, + NULL, + &frame_); +} + void GpuVideoDecoder::OnInitializeComplete(const VideoCodecInfo& info) { info_ = info; GpuVideoDecoderInitDoneParam param; param.success = false; param.input_buffer_handle = base::SharedMemory::NULLHandle(); + param.output_buffer_handle = base::SharedMemory::NULLHandle(); if (!info.success) { SendInitializeDone(param); return; } + // Translate surface type. + // TODO(hclam): Remove |surface_type| since we are always passing textures. + param.surface_type = static_cast<int>(info.stream_info.surface_type); + param.format = info.stream_info.surface_format; + // TODO(jiesun): Check the assumption of input size < original size. param.input_buffer_size = config_.width * config_.height * 3 / 2; if (!CreateInputTransferBuffer(param.input_buffer_size, @@ -72,7 +118,31 @@ void GpuVideoDecoder::OnInitializeComplete(const VideoCodecInfo& info) { return; } + if (info.stream_info.surface_type == VideoFrame::TYPE_SYSTEM_MEMORY) { + // TODO(jiesun): Allocate this according to the surface format. + // The format actually could change during streaming, we need to + // notify GpuVideoDecoderHost side when this happened and renegotiate + // the transfer buffer. + switch (info.stream_info.surface_format) { + case VideoFrame::YV12: + // TODO(jiesun): take stride into account. + param.output_buffer_size = + config_.width * config_.height * 3 / 2; + break; + default: + NOTREACHED(); + } + + if (!CreateOutputTransferBuffer(param.output_buffer_size, + ¶m.output_buffer_handle)) { + SendInitializeDone(param); + return; + } + CreateVideoFrameOnTransferBuffer(); + } + param.success = true; + SendInitializeDone(param); } @@ -106,16 +176,21 @@ void GpuVideoDecoder::ConsumeVideoFrame(scoped_refptr<VideoFrame> frame) { output_param.duration = frame->GetDuration().InMicroseconds(); output_param.flags = frame->IsEndOfStream() ? GpuVideoDecoderOutputBufferParam::kFlagsEndOfStream : 0; + // TODO(hclam): We should have the conversion between VideoFrame and the + // IPC transport param done in GpuVideoDevice. + // This is a hack to pass texture back as a param. + output_param.texture = frame->gl_texture(media::VideoFrame::kRGBPlane); SendFillBufferDone(output_param); } void* GpuVideoDecoder::GetDevice() { - return video_device_->GetDevice(); + // Simply delegate the method call to GpuVideoDevice. + return decode_context_->GetDevice(); } void GpuVideoDecoder::AllocateVideoFrames( - int n, size_t width, size_t height, media::VideoFrame::Format format, - std::vector<scoped_refptr<media::VideoFrame> >* frames, Task* task) { + int n, size_t width, size_t height, + AllocationCompleteCallback* callback) { // Since the communication between Renderer and GPU process is by GL textures. // We need to obtain a set of GL textures by sending IPC commands to the // Renderer process. The recipient of these commands will be IpcVideoDecoder. @@ -133,37 +208,20 @@ void GpuVideoDecoder::AllocateVideoFrames( // // Note that this method is called when there's no video frames allocated or // they were all released. - DCHECK(video_frame_map_.empty()); - - // Save the parameters for allocation. - pending_allocation_.reset(new PendingAllocation()); - pending_allocation_->n = n; - pending_allocation_->width = width; - pending_allocation_->height = height; - pending_allocation_->format = format; - pending_allocation_->frames = frames; - pending_allocation_->task = task; - SendAllocateVideoFrames(n, width, height, format); } -void GpuVideoDecoder::ReleaseAllVideoFrames() { +void GpuVideoDecoder::ReleaseVideoFrames(int n, VideoFrame* frames) { // This method will first call to GpuVideoDevice to release all the resource // associated with a VideoFrame. // - // And then we'll call GpuVideoDevice::ReleaseVideoFrame() to remove the set + // And when we'll call GpuVideoDevice::ReleaseVideoFrames to remove the set // of Gl textures associated with the context. // // And finally we'll send IPC commands to IpcVideoDecoder to destroy all // GL textures generated. - for (VideoFrameMap::iterator i = video_frame_map_.begin(); - i != video_frame_map_.end(); ++i) { - video_device_->ReleaseVideoFrame(i->second); - } - video_frame_map_.clear(); - SendReleaseAllVideoFrames(); } -void GpuVideoDecoder::Destroy(Task* task) { +void GpuVideoDecoder::Destroy(DestructionCompleteCallback* callback) { // TODO(hclam): I still need to think what I should do here. } @@ -173,6 +231,7 @@ GpuVideoDecoder::GpuVideoDecoder( base::ProcessHandle handle, gpu::gles2::GLES2Decoder* decoder) : decoder_host_route_id_(param->decoder_host_route_id), + output_transfer_buffer_busy_(false), pending_output_requests_(0), channel_(channel), renderer_handle_(handle), @@ -201,6 +260,8 @@ void GpuVideoDecoder::OnUninitialize() { } void GpuVideoDecoder::OnFlush() { + // TODO(jiesun): this is wrong?? + output_transfer_buffer_busy_ = false; pending_output_requests_ = 0; decode_engine_->Flush(); @@ -231,56 +292,42 @@ void GpuVideoDecoder::OnFillThisBuffer( if (info_.stream_info.surface_type == VideoFrame::TYPE_SYSTEM_MEMORY) { pending_output_requests_++; + if (!output_transfer_buffer_busy_) { + output_transfer_buffer_busy_ = true; + decode_engine_->ProduceVideoFrame(frame_); + } } else { + // TODO(hclam): I need to rethink how to delegate calls to + // VideoDecodeEngine, I may need to create a GpuVideoDecodeContext that + // provides a method for me to make calls to VideoDecodeEngine with the + // correct VideoFrame. + DCHECK_EQ(VideoFrame::TYPE_GL_TEXTURE, info_.stream_info.surface_type); + + scoped_refptr<media::VideoFrame> frame; + VideoFrame::GlTexture textures[3] = { param.texture, 0, 0 }; + + media::VideoFrame:: CreateFrameGlTexture( + media::VideoFrame::RGBA, config_.width, config_.height, textures, + base::TimeDelta(), base::TimeDelta(), &frame); + decode_engine_->ProduceVideoFrame(frame); } } void GpuVideoDecoder::OnFillThisBufferDoneACK() { if (info_.stream_info.surface_type == VideoFrame::TYPE_SYSTEM_MEMORY) { + output_transfer_buffer_busy_ = false; pending_output_requests_--; if (pending_output_requests_) { + output_transfer_buffer_busy_ = true; decode_engine_->ProduceVideoFrame(frame_); } } } -void GpuVideoDecoder::OnVideoFrameAllocated(int32 frame_id, - std::vector<uint32> textures) { - // This method is called in response to a video frame allocation request sent - // to the Renderer process. - // We should use the textures to generate a VideoFrame by using - // GpuVideoDevice. The VideoFrame created is added to the internal map. - // If we have generated enough VideoFrame, we call |allocation_callack_| to - // complete the allocation process. - media::VideoFrame::GlTexture gl_textures[media::VideoFrame::kMaxPlanes]; - memset(gl_textures, 0, sizeof(gl_textures)); - for (size_t i = 0; i < textures.size(); ++i) { - // Translate the client texture id to service texture id. - bool ret = gles2_decoder_->GetServiceTextureId(textures[i], - gl_textures + i); - DCHECK(ret) << "Cannot translate client texture ID to service ID"; - } - - scoped_refptr<media::VideoFrame> frame; - bool ret = video_device_->CreateVideoFrameFromGlTextures( - pending_allocation_->width, pending_allocation_->height, - pending_allocation_->format, gl_textures, &frame); - - DCHECK(ret) << "Failed to allocation VideoFrame from GL textures)"; - pending_allocation_->frames->push_back(frame); - video_frame_map_.insert(std::make_pair(frame_id, frame)); - - if (video_frame_map_.size() == pending_allocation_->n) { - pending_allocation_->task->Run(); - delete pending_allocation_->task; - pending_allocation_.reset(); - } -} - void GpuVideoDecoder::SendInitializeDone( const GpuVideoDecoderInitDoneParam& param) { if (!channel_->Send( - new GpuVideoDecoderHostMsg_InitializeACK(route_id(), param))) { + new GpuVideoDecoderHostMsg_InitializeACK(route_id(), param))) { LOG(ERROR) << "GpuVideoDecoderMsg_InitializeACK failed"; } } @@ -299,14 +346,14 @@ void GpuVideoDecoder::SendFlushDone() { void GpuVideoDecoder::SendEmptyBufferDone() { if (!channel_->Send( - new GpuVideoDecoderHostMsg_EmptyThisBufferDone(route_id()))) { + new GpuVideoDecoderHostMsg_EmptyThisBufferDone(route_id()))) { LOG(ERROR) << "GpuVideoDecoderMsg_EmptyThisBufferDone failed"; } } void GpuVideoDecoder::SendEmptyBufferACK() { if (!channel_->Send( - new GpuVideoDecoderHostMsg_EmptyThisBufferACK(route_id()))) { + new GpuVideoDecoderHostMsg_EmptyThisBufferACK(route_id()))) { LOG(ERROR) << "GpuVideoDecoderMsg_EmptyThisBufferACK failed"; } } @@ -314,23 +361,7 @@ void GpuVideoDecoder::SendEmptyBufferACK() { void GpuVideoDecoder::SendFillBufferDone( const GpuVideoDecoderOutputBufferParam& param) { if (!channel_->Send( - new GpuVideoDecoderHostMsg_FillThisBufferDone(route_id(), param))) { + new GpuVideoDecoderHostMsg_FillThisBufferDone(route_id(), param))) { LOG(ERROR) << "GpuVideoDecoderMsg_FillThisBufferDone failed"; } } - -void GpuVideoDecoder::SendAllocateVideoFrames( - int n, size_t width, size_t height, media::VideoFrame::Format format) { - if (!channel_->Send( - new GpuVideoDecoderHostMsg_AllocateVideoFrames( - route_id(), n, width, height, format))) { - LOG(ERROR) << "GpuVideoDecoderMsg_AllocateVideoFrames failed"; - } -} - -void GpuVideoDecoder::SendReleaseAllVideoFrames() { - if (!channel_->Send( - new GpuVideoDecoderHostMsg_ReleaseAllVideoFrames(route_id()))) { - LOG(ERROR) << "GpuVideoDecoderMsg_ReleaseAllVideoFrames failed"; - } -} diff --git a/chrome/gpu/gpu_video_decoder.h b/chrome/gpu/gpu_video_decoder.h index 9f77700..7aabffa 100644 --- a/chrome/gpu/gpu_video_decoder.h +++ b/chrome/gpu/gpu_video_decoder.h @@ -5,9 +5,6 @@ #ifndef CHROME_GPU_GPU_VIDEO_DECODER_H_ #define CHROME_GPU_GPU_VIDEO_DECODER_H_ -#include <map> -#include <vector> - #include "base/basictypes.h" #include "base/callback.h" #include "base/ref_counted.h" @@ -78,8 +75,8 @@ class GpuChannel; // will maintain such mapping. // class GpuVideoDecoder - : public base::RefCountedThreadSafe<GpuVideoDecoder>, - public IPC::Channel::Listener, + : public IPC::Channel::Listener, + public base::RefCountedThreadSafe<GpuVideoDecoder>, public media::VideoDecodeEngine::EventHandler, public media::VideoDecodeContext { @@ -101,11 +98,10 @@ class GpuVideoDecoder // VideoDecodeContext implementation. virtual void* GetDevice(); - virtual void AllocateVideoFrames( - int n, size_t width, size_t height, media::VideoFrame::Format format, - std::vector<scoped_refptr<media::VideoFrame> >* frames, Task* task); - virtual void ReleaseAllVideoFrames(); - virtual void Destroy(Task* task); + virtual void AllocateVideoFrames(int n, size_t width, size_t height, + AllocationCompleteCallback* callback); + virtual void ReleaseVideoFrames(int n, VideoFrame* frames); + virtual void Destroy(DestructionCompleteCallback* callback); // Constructor and destructor. GpuVideoDecoder(const GpuVideoDecoderInfoParam* param, @@ -115,45 +111,19 @@ class GpuVideoDecoder virtual ~GpuVideoDecoder() {} private: - struct PendingAllocation { - size_t n; - size_t width; - size_t height; - media::VideoFrame::Format format; - std::vector<scoped_refptr<media::VideoFrame> >* frames; - Task* task; - }; - int32 route_id() { return decoder_host_route_id_; } bool CreateInputTransferBuffer(uint32 size, base::SharedMemoryHandle* handle); - - // These methods are message handlers for the messages sent from the Renderer - // process. - void OnInitialize(const GpuVideoDecoderInitParam& param); - void OnUninitialize(); - void OnFlush(); - void OnEmptyThisBuffer(const GpuVideoDecoderInputBufferParam& buffer); - void OnFillThisBuffer(const GpuVideoDecoderOutputBufferParam& param); - void OnFillThisBufferDoneACK(); - void OnVideoFrameAllocated(int32 frame_id, std::vector<uint32> textures); - - // Helper methods for sending messages to the Renderer process. - void SendInitializeDone(const GpuVideoDecoderInitDoneParam& param); - void SendUninitializeDone(); - void SendFlushDone(); - void SendEmptyBufferDone(); - void SendEmptyBufferACK(); - void SendFillBufferDone(const GpuVideoDecoderOutputBufferParam& param); - void SendAllocateVideoFrames( - int n, size_t width, size_t height, media::VideoFrame::Format format); - void SendReleaseAllVideoFrames(); + bool CreateOutputTransferBuffer(uint32 size, + base::SharedMemoryHandle* handle); + void CreateVideoFrameOnTransferBuffer(); int32 decoder_host_route_id_; // Used only in system memory path. i.e. Remove this later. scoped_refptr<VideoFrame> frame_; + bool output_transfer_buffer_busy_; int32 pending_output_requests_; GpuChannel* channel_; @@ -163,27 +133,30 @@ class GpuVideoDecoder // is used to switch context and translate client texture ID to service ID. gpu::gles2::GLES2Decoder* gles2_decoder_; - // Memory for transfering the input data for the hardware video decoder. scoped_ptr<base::SharedMemory> input_transfer_buffer_; + scoped_ptr<base::SharedMemory> output_transfer_buffer_; - // VideoDecodeEngine is used to do the actual video decoding. scoped_ptr<media::VideoDecodeEngine> decode_engine_; - - // GpuVideoDevice is used to generate VideoFrame(s) from GL textures. The - // frames generated are understood by the decode engine. - scoped_ptr<GpuVideoDevice> video_device_; - - // Contain information for allocation VideoFrame(s). - scoped_ptr<PendingAllocation> pending_allocation_; - - // Contains the mapping between a |frame_id| and VideoFrame generated by - // GpuVideoDevice from the associated GL textures. - typedef std::map<int32, scoped_refptr<media::VideoFrame> > VideoFrameMap; - VideoFrameMap video_frame_map_; - + scoped_ptr<GpuVideoDevice> decode_context_; media::VideoCodecConfig config_; media::VideoCodecInfo info_; + // Input message handler. + void OnInitialize(const GpuVideoDecoderInitParam& param); + void OnUninitialize(); + void OnFlush(); + void OnEmptyThisBuffer(const GpuVideoDecoderInputBufferParam& buffer); + void OnFillThisBuffer(const GpuVideoDecoderOutputBufferParam& param); + void OnFillThisBufferDoneACK(); + + // Output message helper. + void SendInitializeDone(const GpuVideoDecoderInitDoneParam& param); + void SendUninitializeDone(); + void SendFlushDone(); + void SendEmptyBufferDone(); + void SendEmptyBufferACK(); + void SendFillBufferDone(const GpuVideoDecoderOutputBufferParam& param); + DISALLOW_COPY_AND_ASSIGN(GpuVideoDecoder); }; |