diff options
author | hclam@chromium.org <hclam@chromium.org@0039d316-1c4b-4281-b951-d872f2087c98> | 2010-09-17 22:03:16 +0000 |
---|---|---|
committer | hclam@chromium.org <hclam@chromium.org@0039d316-1c4b-4281-b951-d872f2087c98> | 2010-09-17 22:03:16 +0000 |
commit | 1318e92f70e240d7ae71320ea7e4fcae18f2ce3e (patch) | |
tree | 412ee9192a850d9867bea3a841d800f67081ebe3 /chrome/gpu | |
parent | 9fcd39385ae39a68d3509238bd9ef83af1868fc7 (diff) | |
download | chromium_src-1318e92f70e240d7ae71320ea7e4fcae18f2ce3e.zip chromium_src-1318e92f70e240d7ae71320ea7e4fcae18f2ce3e.tar.gz chromium_src-1318e92f70e240d7ae71320ea7e4fcae18f2ce3e.tar.bz2 |
Resubmit GpuVideoDecoder and related patches.
BUG=53714
TEST=Tree is green
Review URL: http://codereview.chromium.org/3442006
git-svn-id: svn://svn.chromium.org/chrome/trunk/src@59860 0039d316-1c4b-4281-b951-d872f2087c98
Diffstat (limited to 'chrome/gpu')
-rw-r--r-- | chrome/gpu/gpu_video_decoder.cc | 211 | ||||
-rw-r--r-- | chrome/gpu/gpu_video_decoder.h | 104 | ||||
-rw-r--r-- | chrome/gpu/media/fake_gl_video_decode_engine.cc | 50 | ||||
-rw-r--r-- | chrome/gpu/media/fake_gl_video_decode_engine.h | 20 | ||||
-rw-r--r-- | chrome/gpu/media/fake_gl_video_device.cc | 60 | ||||
-rw-r--r-- | chrome/gpu/media/fake_gl_video_device.h | 27 | ||||
-rw-r--r-- | chrome/gpu/media/gpu_video_device.h | 17 |
7 files changed, 330 insertions, 159 deletions
diff --git a/chrome/gpu/gpu_video_decoder.cc b/chrome/gpu/gpu_video_decoder.cc index bcdce82..bf5881ed 100644 --- a/chrome/gpu/gpu_video_decoder.cc +++ b/chrome/gpu/gpu_video_decoder.cc @@ -4,9 +4,11 @@ #include "chrome/gpu/gpu_video_decoder.h" +#include "chrome/common/child_thread.h" #include "chrome/common/gpu_messages.h" #include "chrome/gpu/gpu_channel.h" #include "chrome/gpu/media/fake_gl_video_decode_engine.h" +#include "chrome/gpu/media/fake_gl_video_device.h" #include "media/base/data_buffer.h" #include "media/base/video_frame.h" @@ -53,63 +55,17 @@ bool GpuVideoDecoder::CreateInputTransferBuffer( return true; } -bool GpuVideoDecoder::CreateOutputTransferBuffer( - uint32 size, - base::SharedMemoryHandle* handle) { - output_transfer_buffer_.reset(new base::SharedMemory); - if (!output_transfer_buffer_.get()) - return false; - - if (!output_transfer_buffer_->Create(std::wstring(), false, false, size)) - return false; - - if (!output_transfer_buffer_->Map(size)) - return false; - - if (!output_transfer_buffer_->ShareToProcess(renderer_handle_, handle)) - return false; - - return true; -} - -void GpuVideoDecoder::CreateVideoFrameOnTransferBuffer() { - const base::TimeDelta kZero; - uint8* data[media::VideoFrame::kMaxPlanes]; - int32 strides[media::VideoFrame::kMaxPlanes]; - memset(data, 0, sizeof(data)); - memset(strides, 0, sizeof(strides)); - data[0] = static_cast<uint8*>(output_transfer_buffer_->memory()); - data[1] = data[0] + config_.width * config_.height; - data[2] = data[1] + config_.width * config_.height / 4; - strides[0] = config_.width; - strides[1] = strides[2] = config_.width >> 1; - media::VideoFrame:: CreateFrameExternal( - media::VideoFrame::TYPE_SYSTEM_MEMORY, - media::VideoFrame::YV12, - config_.width, config_.height, 3, - data, strides, - kZero, kZero, - NULL, - &frame_); -} - void GpuVideoDecoder::OnInitializeComplete(const VideoCodecInfo& info) { info_ = info; GpuVideoDecoderInitDoneParam param; param.success = false; param.input_buffer_handle = base::SharedMemory::NULLHandle(); - param.output_buffer_handle = base::SharedMemory::NULLHandle(); if (!info.success) { SendInitializeDone(param); return; } - // Translate surface type. - // TODO(hclam): Remove |surface_type| since we are always passing textures. - param.surface_type = static_cast<int>(info.stream_info.surface_type); - param.format = info.stream_info.surface_format; - // TODO(jiesun): Check the assumption of input size < original size. param.input_buffer_size = config_.width * config_.height * 3 / 2; if (!CreateInputTransferBuffer(param.input_buffer_size, @@ -118,31 +74,7 @@ void GpuVideoDecoder::OnInitializeComplete(const VideoCodecInfo& info) { return; } - if (info.stream_info.surface_type == VideoFrame::TYPE_SYSTEM_MEMORY) { - // TODO(jiesun): Allocate this according to the surface format. - // The format actually could change during streaming, we need to - // notify GpuVideoDecoderHost side when this happened and renegotiate - // the transfer buffer. - switch (info.stream_info.surface_format) { - case VideoFrame::YV12: - // TODO(jiesun): take stride into account. - param.output_buffer_size = - config_.width * config_.height * 3 / 2; - break; - default: - NOTREACHED(); - } - - if (!CreateOutputTransferBuffer(param.output_buffer_size, - ¶m.output_buffer_handle)) { - SendInitializeDone(param); - return; - } - CreateVideoFrameOnTransferBuffer(); - } - param.success = true; - SendInitializeDone(param); } @@ -176,21 +108,20 @@ void GpuVideoDecoder::ConsumeVideoFrame(scoped_refptr<VideoFrame> frame) { output_param.duration = frame->GetDuration().InMicroseconds(); output_param.flags = frame->IsEndOfStream() ? GpuVideoDecoderOutputBufferParam::kFlagsEndOfStream : 0; - // TODO(hclam): We should have the conversion between VideoFrame and the - // IPC transport param done in GpuVideoDevice. - // This is a hack to pass texture back as a param. - output_param.texture = frame->gl_texture(media::VideoFrame::kRGBPlane); SendFillBufferDone(output_param); } void* GpuVideoDecoder::GetDevice() { + bool ret = gles2_decoder_->MakeCurrent(); + DCHECK(ret) << "Failed to switch context"; + // Simply delegate the method call to GpuVideoDevice. - return decode_context_->GetDevice(); + return video_device_->GetDevice(); } void GpuVideoDecoder::AllocateVideoFrames( - int n, size_t width, size_t height, - AllocationCompleteCallback* callback) { + int n, size_t width, size_t height, media::VideoFrame::Format format, + std::vector<scoped_refptr<media::VideoFrame> >* frames, Task* task) { // Since the communication between Renderer and GPU process is by GL textures. // We need to obtain a set of GL textures by sending IPC commands to the // Renderer process. The recipient of these commands will be IpcVideoDecoder. @@ -208,20 +139,56 @@ void GpuVideoDecoder::AllocateVideoFrames( // // Note that this method is called when there's no video frames allocated or // they were all released. + DCHECK(video_frame_map_.empty()); + + // Save the parameters for allocation. + pending_allocation_.reset(new PendingAllocation()); + pending_allocation_->n = n; + pending_allocation_->width = width; + pending_allocation_->height = height; + pending_allocation_->format = format; + pending_allocation_->frames = frames; + pending_allocation_->task = task; + SendAllocateVideoFrames(n, width, height, format); } -void GpuVideoDecoder::ReleaseVideoFrames(int n, VideoFrame* frames) { +void GpuVideoDecoder::ReleaseAllVideoFrames() { // This method will first call to GpuVideoDevice to release all the resource // associated with a VideoFrame. // - // And when we'll call GpuVideoDevice::ReleaseVideoFrames to remove the set + // And then we'll call GpuVideoDevice::ReleaseVideoFrame() to remove the set // of Gl textures associated with the context. // // And finally we'll send IPC commands to IpcVideoDecoder to destroy all // GL textures generated. + bool ret = gles2_decoder_->MakeCurrent(); + DCHECK(ret) << "Failed to switch context"; + + for (VideoFrameMap::iterator i = video_frame_map_.begin(); + i != video_frame_map_.end(); ++i) { + video_device_->ReleaseVideoFrame(i->second); + } + video_frame_map_.clear(); + SendReleaseAllVideoFrames(); } -void GpuVideoDecoder::Destroy(DestructionCompleteCallback* callback) { +void GpuVideoDecoder::UploadToVideoFrame(void* buffer, + scoped_refptr<media::VideoFrame> frame, + Task* task) { + // This method is called by VideoDecodeEngine to upload a buffer to a + // VideoFrame. We should just delegate this to GpuVideoDevice which contains + // the actual implementation. + bool ret = gles2_decoder_->MakeCurrent(); + DCHECK(ret) << "Failed to switch context"; + + // Actually doing the upload on the main thread. + ret = video_device_->UploadToVideoFrame(buffer, frame); + DCHECK(ret) << "Failed to upload video content to a VideoFrame."; + task->Run(); + delete task; +} + +void GpuVideoDecoder::Destroy(Task* task) { // TODO(hclam): I still need to think what I should do here. } @@ -231,7 +198,6 @@ GpuVideoDecoder::GpuVideoDecoder( base::ProcessHandle handle, gpu::gles2::GLES2Decoder* decoder) : decoder_host_route_id_(param->decoder_host_route_id), - output_transfer_buffer_busy_(false), pending_output_requests_(0), channel_(channel), renderer_handle_(handle), @@ -242,17 +208,17 @@ GpuVideoDecoder::GpuVideoDecoder( // TODO(jiesun): find a better way to determine which VideoDecodeEngine // to return on current platform. decode_engine_.reset(new FakeGlVideoDecodeEngine()); + video_device_.reset(new FakeGlVideoDevice()); } void GpuVideoDecoder::OnInitialize(const GpuVideoDecoderInitParam& param) { // TODO(hclam): Initialize the VideoDecodeContext first. - // TODO(jiesun): codec id should come from |param|. config_.codec = media::kCodecH264; config_.width = param.width; config_.height = param.height; config_.opaque_context = NULL; - decode_engine_->Initialize(NULL, this, config_); + decode_engine_->Initialize(NULL, this, this, config_); } void GpuVideoDecoder::OnUninitialize() { @@ -260,8 +226,6 @@ void GpuVideoDecoder::OnUninitialize() { } void GpuVideoDecoder::OnFlush() { - // TODO(jiesun): this is wrong?? - output_transfer_buffer_busy_ = false; pending_output_requests_ = 0; decode_engine_->Flush(); @@ -285,49 +249,60 @@ void GpuVideoDecoder::OnEmptyThisBuffer( void GpuVideoDecoder::OnFillThisBuffer( const GpuVideoDecoderOutputBufferParam& param) { // Switch context before calling to the decode engine. - // TODO(hclam): This is temporary to allow FakeGlVideoDecodeEngine to issue - // GL commands correctly. bool ret = gles2_decoder_->MakeCurrent(); DCHECK(ret) << "Failed to switch context"; if (info_.stream_info.surface_type == VideoFrame::TYPE_SYSTEM_MEMORY) { pending_output_requests_++; - if (!output_transfer_buffer_busy_) { - output_transfer_buffer_busy_ = true; - decode_engine_->ProduceVideoFrame(frame_); - } } else { - // TODO(hclam): I need to rethink how to delegate calls to - // VideoDecodeEngine, I may need to create a GpuVideoDecodeContext that - // provides a method for me to make calls to VideoDecodeEngine with the - // correct VideoFrame. - DCHECK_EQ(VideoFrame::TYPE_GL_TEXTURE, info_.stream_info.surface_type); - - scoped_refptr<media::VideoFrame> frame; - VideoFrame::GlTexture textures[3] = { param.texture, 0, 0 }; - - media::VideoFrame:: CreateFrameGlTexture( - media::VideoFrame::RGBA, config_.width, config_.height, textures, - base::TimeDelta(), base::TimeDelta(), &frame); - decode_engine_->ProduceVideoFrame(frame); } } void GpuVideoDecoder::OnFillThisBufferDoneACK() { if (info_.stream_info.surface_type == VideoFrame::TYPE_SYSTEM_MEMORY) { - output_transfer_buffer_busy_ = false; pending_output_requests_--; if (pending_output_requests_) { - output_transfer_buffer_busy_ = true; decode_engine_->ProduceVideoFrame(frame_); } } } +void GpuVideoDecoder::OnVideoFrameAllocated(int32 frame_id, + std::vector<uint32> textures) { + // This method is called in response to a video frame allocation request sent + // to the Renderer process. + // We should use the textures to generate a VideoFrame by using + // GpuVideoDevice. The VideoFrame created is added to the internal map. + // If we have generated enough VideoFrame, we call |allocation_callack_| to + // complete the allocation process. + for (size_t i = 0; i < textures.size(); ++i) { + media::VideoFrame::GlTexture gl_texture; + // Translate the client texture id to service texture id. + bool ret = gles2_decoder_->GetServiceTextureId(textures[i], &gl_texture); + DCHECK(ret) << "Cannot translate client texture ID to service ID"; + textures[i] = gl_texture; + } + + scoped_refptr<media::VideoFrame> frame; + bool ret = video_device_->CreateVideoFrameFromGlTextures( + pending_allocation_->width, pending_allocation_->height, + pending_allocation_->format, textures, &frame); + + DCHECK(ret) << "Failed to allocation VideoFrame from GL textures)"; + pending_allocation_->frames->push_back(frame); + video_frame_map_.insert(std::make_pair(frame_id, frame)); + + if (video_frame_map_.size() == pending_allocation_->n) { + pending_allocation_->task->Run(); + delete pending_allocation_->task; + pending_allocation_.reset(); + } +} + void GpuVideoDecoder::SendInitializeDone( const GpuVideoDecoderInitDoneParam& param) { if (!channel_->Send( - new GpuVideoDecoderHostMsg_InitializeACK(route_id(), param))) { + new GpuVideoDecoderHostMsg_InitializeACK(route_id(), param))) { LOG(ERROR) << "GpuVideoDecoderMsg_InitializeACK failed"; } } @@ -346,14 +321,14 @@ void GpuVideoDecoder::SendFlushDone() { void GpuVideoDecoder::SendEmptyBufferDone() { if (!channel_->Send( - new GpuVideoDecoderHostMsg_EmptyThisBufferDone(route_id()))) { + new GpuVideoDecoderHostMsg_EmptyThisBufferDone(route_id()))) { LOG(ERROR) << "GpuVideoDecoderMsg_EmptyThisBufferDone failed"; } } void GpuVideoDecoder::SendEmptyBufferACK() { if (!channel_->Send( - new GpuVideoDecoderHostMsg_EmptyThisBufferACK(route_id()))) { + new GpuVideoDecoderHostMsg_EmptyThisBufferACK(route_id()))) { LOG(ERROR) << "GpuVideoDecoderMsg_EmptyThisBufferACK failed"; } } @@ -361,7 +336,23 @@ void GpuVideoDecoder::SendEmptyBufferACK() { void GpuVideoDecoder::SendFillBufferDone( const GpuVideoDecoderOutputBufferParam& param) { if (!channel_->Send( - new GpuVideoDecoderHostMsg_FillThisBufferDone(route_id(), param))) { + new GpuVideoDecoderHostMsg_FillThisBufferDone(route_id(), param))) { LOG(ERROR) << "GpuVideoDecoderMsg_FillThisBufferDone failed"; } } + +void GpuVideoDecoder::SendAllocateVideoFrames( + int n, size_t width, size_t height, media::VideoFrame::Format format) { + if (!channel_->Send( + new GpuVideoDecoderHostMsg_AllocateVideoFrames( + route_id(), n, width, height, format))) { + LOG(ERROR) << "GpuVideoDecoderMsg_AllocateVideoFrames failed"; + } +} + +void GpuVideoDecoder::SendReleaseAllVideoFrames() { + if (!channel_->Send( + new GpuVideoDecoderHostMsg_ReleaseAllVideoFrames(route_id()))) { + LOG(ERROR) << "GpuVideoDecoderMsg_ReleaseAllVideoFrames failed"; + } +} diff --git a/chrome/gpu/gpu_video_decoder.h b/chrome/gpu/gpu_video_decoder.h index 7aabffa..d4c9b09 100644 --- a/chrome/gpu/gpu_video_decoder.h +++ b/chrome/gpu/gpu_video_decoder.h @@ -5,6 +5,9 @@ #ifndef CHROME_GPU_GPU_VIDEO_DECODER_H_ #define CHROME_GPU_GPU_VIDEO_DECODER_H_ +#include <map> +#include <vector> + #include "base/basictypes.h" #include "base/callback.h" #include "base/ref_counted.h" @@ -36,7 +39,7 @@ class GpuChannel; // In addition to delegating video related commamnds to VideoDecodeEngine it // has the following important functions: // -// Buffer Allocation +// BUFFER ALLOCATION // // VideoDecodeEngine requires platform specific video frame buffer to operate. // In order to abstract the platform specific bits GpuVideoDecoderContext is @@ -63,20 +66,28 @@ class GpuChannel; // VideoFrame(s) from the textures. // 6. GpuVideoDecoder sends the VideoFrame(s) generated to VideoDecodeEngine. // -// Buffer Translation +// BUFFER UPLOADING +// +// A VideoDecodeEngine always produces some device specific buffer. In order to +// use them in Chrome we always upload them to GL textures. The upload step is +// different on each platform and each subsystem. We perform these special +// upload steps by using GpuVideoDevice which are written for each +// VideoDecodeEngine. +// +// BUFFER MAPPING // // GpuVideoDecoder will be working with VideoDecodeEngine, they exchange -// buffers that are only meaningful to VideoDecodeEngine. In order to translate -// that to something we can transport in the IPC channel we need a mapping -// between VideoFrame and buffer ID known between GpuVideoDecoder and +// buffers that are only meaningful to VideoDecodeEngine. In order to map that +// to something we can transport in the IPC channel we need a mapping between +// VideoFrame and buffer ID known between GpuVideoDecoder and // GpuVideoDecoderHost in the Renderer process. // // After texture allocation and VideoFrame allocation are done, GpuVideoDecoder // will maintain such mapping. // class GpuVideoDecoder - : public IPC::Channel::Listener, - public base::RefCountedThreadSafe<GpuVideoDecoder>, + : public base::RefCountedThreadSafe<GpuVideoDecoder>, + public IPC::Channel::Listener, public media::VideoDecodeEngine::EventHandler, public media::VideoDecodeContext { @@ -98,10 +109,14 @@ class GpuVideoDecoder // VideoDecodeContext implementation. virtual void* GetDevice(); - virtual void AllocateVideoFrames(int n, size_t width, size_t height, - AllocationCompleteCallback* callback); - virtual void ReleaseVideoFrames(int n, VideoFrame* frames); - virtual void Destroy(DestructionCompleteCallback* callback); + virtual void AllocateVideoFrames( + int n, size_t width, size_t height, media::VideoFrame::Format format, + std::vector<scoped_refptr<media::VideoFrame> >* frames, Task* task); + virtual void ReleaseAllVideoFrames(); + virtual void UploadToVideoFrame(void* buffer, + scoped_refptr<media::VideoFrame> frame, + Task* task); + virtual void Destroy(Task* task); // Constructor and destructor. GpuVideoDecoder(const GpuVideoDecoderInfoParam* param, @@ -111,19 +126,45 @@ class GpuVideoDecoder virtual ~GpuVideoDecoder() {} private: + struct PendingAllocation { + size_t n; + size_t width; + size_t height; + media::VideoFrame::Format format; + std::vector<scoped_refptr<media::VideoFrame> >* frames; + Task* task; + }; + int32 route_id() { return decoder_host_route_id_; } bool CreateInputTransferBuffer(uint32 size, base::SharedMemoryHandle* handle); - bool CreateOutputTransferBuffer(uint32 size, - base::SharedMemoryHandle* handle); - void CreateVideoFrameOnTransferBuffer(); + + // These methods are message handlers for the messages sent from the Renderer + // process. + void OnInitialize(const GpuVideoDecoderInitParam& param); + void OnUninitialize(); + void OnFlush(); + void OnEmptyThisBuffer(const GpuVideoDecoderInputBufferParam& buffer); + void OnFillThisBuffer(const GpuVideoDecoderOutputBufferParam& param); + void OnFillThisBufferDoneACK(); + void OnVideoFrameAllocated(int32 frame_id, std::vector<uint32> textures); + + // Helper methods for sending messages to the Renderer process. + void SendInitializeDone(const GpuVideoDecoderInitDoneParam& param); + void SendUninitializeDone(); + void SendFlushDone(); + void SendEmptyBufferDone(); + void SendEmptyBufferACK(); + void SendFillBufferDone(const GpuVideoDecoderOutputBufferParam& param); + void SendAllocateVideoFrames( + int n, size_t width, size_t height, media::VideoFrame::Format format); + void SendReleaseAllVideoFrames(); int32 decoder_host_route_id_; // Used only in system memory path. i.e. Remove this later. scoped_refptr<VideoFrame> frame_; - bool output_transfer_buffer_busy_; int32 pending_output_requests_; GpuChannel* channel_; @@ -133,29 +174,26 @@ class GpuVideoDecoder // is used to switch context and translate client texture ID to service ID. gpu::gles2::GLES2Decoder* gles2_decoder_; + // Memory for transfering the input data for the hardware video decoder. scoped_ptr<base::SharedMemory> input_transfer_buffer_; - scoped_ptr<base::SharedMemory> output_transfer_buffer_; + // VideoDecodeEngine is used to do the actual video decoding. scoped_ptr<media::VideoDecodeEngine> decode_engine_; - scoped_ptr<GpuVideoDevice> decode_context_; - media::VideoCodecConfig config_; - media::VideoCodecInfo info_; - // Input message handler. - void OnInitialize(const GpuVideoDecoderInitParam& param); - void OnUninitialize(); - void OnFlush(); - void OnEmptyThisBuffer(const GpuVideoDecoderInputBufferParam& buffer); - void OnFillThisBuffer(const GpuVideoDecoderOutputBufferParam& param); - void OnFillThisBufferDoneACK(); + // GpuVideoDevice is used to generate VideoFrame(s) from GL textures. The + // frames generated are understood by the decode engine. + scoped_ptr<GpuVideoDevice> video_device_; - // Output message helper. - void SendInitializeDone(const GpuVideoDecoderInitDoneParam& param); - void SendUninitializeDone(); - void SendFlushDone(); - void SendEmptyBufferDone(); - void SendEmptyBufferACK(); - void SendFillBufferDone(const GpuVideoDecoderOutputBufferParam& param); + // Contain information for allocation VideoFrame(s). + scoped_ptr<PendingAllocation> pending_allocation_; + + // Contains the mapping between a |frame_id| and VideoFrame generated by + // GpuVideoDevice from the associated GL textures. + typedef std::map<int32, scoped_refptr<media::VideoFrame> > VideoFrameMap; + VideoFrameMap video_frame_map_; + + media::VideoCodecConfig config_; + media::VideoCodecInfo info_; DISALLOW_COPY_AND_ASSIGN(GpuVideoDecoder); }; diff --git a/chrome/gpu/media/fake_gl_video_decode_engine.cc b/chrome/gpu/media/fake_gl_video_decode_engine.cc index 52e2dfd..b3d093f 100644 --- a/chrome/gpu/media/fake_gl_video_decode_engine.cc +++ b/chrome/gpu/media/fake_gl_video_decode_engine.cc @@ -4,7 +4,8 @@ #include "chrome/gpu/media/fake_gl_video_decode_engine.h" -#include "app/gfx/gl/gl_bindings.h" +#include "media/base/video_frame.h" +#include "media/video/video_decode_context.h" FakeGlVideoDecodeEngine::FakeGlVideoDecodeEngine() : width_(0), @@ -18,11 +19,33 @@ FakeGlVideoDecodeEngine::~FakeGlVideoDecodeEngine() { void FakeGlVideoDecodeEngine::Initialize( MessageLoop* message_loop, media::VideoDecodeEngine::EventHandler* event_handler, + media::VideoDecodeContext* context, const media::VideoCodecConfig& config) { handler_ = event_handler; + context_ = context; width_ = config.width; height_ = config.height; + // Create an internal VideoFrame that we can write to. This is going to be + // uploaded through VideoDecodeContext. + media::VideoFrame::CreateFrame( + media::VideoFrame::RGBA, width_, height_, base::TimeDelta(), + base::TimeDelta(), &internal_frame_); + memset(internal_frame_->data(media::VideoFrame::kRGBPlane), 0, + height_ * internal_frame_->stride(media::VideoFrame::kRGBPlane)); + + // Use VideoDecodeContext to allocate VideoFrame that can be consumed by + // external body. + context_->AllocateVideoFrames( + 1, width_, height_, media::VideoFrame::RGBA, &external_frames_, + NewRunnableMethod(this, + &FakeGlVideoDecodeEngine::AllocationCompleteTask)); +} + +void FakeGlVideoDecodeEngine::AllocationCompleteTask() { + DCHECK_EQ(1u, external_frames_.size()); + DCHECK_EQ(media::VideoFrame::TYPE_GL_TEXTURE, external_frames_[0]->type()); + media::VideoCodecInfo info; info.success = true; info.provides_buffers = true; @@ -30,9 +53,6 @@ void FakeGlVideoDecodeEngine::Initialize( info.stream_info.surface_type = media::VideoFrame::TYPE_GL_TEXTURE; info.stream_info.surface_width = width_; info.stream_info.surface_height = height_; - - // TODO(hclam): When we have VideoDecodeContext we should use it to allocate - // video frames. handler_->OnInitializeComplete(info); } @@ -62,7 +82,7 @@ void FakeGlVideoDecodeEngine::ProduceVideoFrame( scoped_array<uint8> buffer(new uint8[size]); memset(buffer.get(), 0, size); - uint8* row = buffer.get(); + uint8* row = internal_frame_->data(media::VideoFrame::kRGBPlane); static int seed = 0; for (int y = 0; y < height_; ++y) { @@ -75,14 +95,18 @@ void FakeGlVideoDecodeEngine::ProduceVideoFrame( } ++seed; - // Assume we are in the right context and then upload the content to the - // texture. - glBindTexture(GL_TEXTURE_2D, - frame->gl_texture(media::VideoFrame::kRGBPlane)); - glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA, width_, height_, 0, GL_RGBA, - GL_UNSIGNED_BYTE, buffer.get()); + // After we have filled the content upload the internal frame to the + // VideoFrame allocated through VideoDecodeContext. + context_->UploadToVideoFrame( + internal_frame_, external_frames_[0], + NewRunnableMethod(this, &FakeGlVideoDecodeEngine::UploadCompleteTask, + external_frames_[0])); +} - // We have done generating data to the frame so give it to the handler. - // TODO(hclam): Advance the timestamp every time we call this method. +void FakeGlVideoDecodeEngine::UploadCompleteTask( + scoped_refptr<media::VideoFrame> frame) { + // |frame| is the upload target. We can immediately send this frame out. handler_->ConsumeVideoFrame(frame); } + +DISABLE_RUNNABLE_METHOD_REFCOUNT(FakeGlVideoDecodeEngine); diff --git a/chrome/gpu/media/fake_gl_video_decode_engine.h b/chrome/gpu/media/fake_gl_video_decode_engine.h index c3eeb3e..164c8c4 100644 --- a/chrome/gpu/media/fake_gl_video_decode_engine.h +++ b/chrome/gpu/media/fake_gl_video_decode_engine.h @@ -5,10 +5,13 @@ #ifndef CHROME_GPU_MEDIA_FAKE_GL_VIDEO_DECODE_ENGINE_H_ #define CHROME_GPU_MEDIA_FAKE_GL_VIDEO_DECODE_ENGINE_H_ +#include <vector> + #include "base/scoped_ptr.h" #include "media/video/video_decode_engine.h" namespace media { +class VideoDecodeContext; class VideoFrame; } // namespace media @@ -20,6 +23,7 @@ class FakeGlVideoDecodeEngine : public media::VideoDecodeEngine { virtual void Initialize( MessageLoop* message_loop, media::VideoDecodeEngine::EventHandler* event_handler, + media::VideoDecodeContext* context, const media::VideoCodecConfig& config); virtual void Uninitialize(); @@ -29,9 +33,25 @@ class FakeGlVideoDecodeEngine : public media::VideoDecodeEngine { virtual void ProduceVideoFrame(scoped_refptr<media::VideoFrame> frame); private: + // This method is called when video frames allocation is completed by + // VideoDecodeContext. + void AllocationCompleteTask(); + + // This method is called by VideoDecodeContext when uploading to a VideoFrame + // has completed. + void UploadCompleteTask(scoped_refptr<media::VideoFrame> frame); + int width_; int height_; media::VideoDecodeEngine::EventHandler* handler_; + media::VideoDecodeContext* context_; + + // Internal video frame that is to be uploaded through VideoDecodeContext. + scoped_refptr<media::VideoFrame> internal_frame_; + + // VideoFrame(s) allocated through VideoDecodeContext. These frames are + // opaque to us. And we need an extra upload step. + std::vector<scoped_refptr<media::VideoFrame> > external_frames_; DISALLOW_COPY_AND_ASSIGN(FakeGlVideoDecodeEngine); }; diff --git a/chrome/gpu/media/fake_gl_video_device.cc b/chrome/gpu/media/fake_gl_video_device.cc new file mode 100644 index 0000000..df1b5b1 --- /dev/null +++ b/chrome/gpu/media/fake_gl_video_device.cc @@ -0,0 +1,60 @@ +// Copyright (c) 2010 The Chromium Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#include "chrome/gpu/media/fake_gl_video_device.h" + +#include "app/gfx/gl/gl_bindings.h" +#include "media/base/video_frame.h" + +void* FakeGlVideoDevice::GetDevice() { + // No actual hardware device should be used. + return NULL; +} + +bool FakeGlVideoDevice::CreateVideoFrameFromGlTextures( + size_t width, size_t height, media::VideoFrame::Format format, + const std::vector<media::VideoFrame::GlTexture>& textures, + scoped_refptr<media::VideoFrame>* frame) { + media::VideoFrame::GlTexture texture_array[media::VideoFrame::kMaxPlanes]; + memset(texture_array, 0, sizeof(texture_array)); + + for (size_t i = 0; i < textures.size(); ++i) { + texture_array[i] = textures[i]; + } + + media::VideoFrame::CreateFrameGlTexture(format, + width, + height, + texture_array, + base::TimeDelta(), + base::TimeDelta(), + frame); + return *frame != NULL; +} + +void FakeGlVideoDevice::ReleaseVideoFrame( + const scoped_refptr<media::VideoFrame>& frame) { + // We didn't need to anything here because we didin't allocate any resources + // for the VideoFrame(s) generated. +} + +bool FakeGlVideoDevice::UploadToVideoFrame( + void* buffer, scoped_refptr<media::VideoFrame> frame) { + // Assume we are in the right context and then upload the content to the + // texture. + glBindTexture(GL_TEXTURE_2D, + frame->gl_texture(media::VideoFrame::kRGBPlane)); + + // |buffer| is also a VideoFrame. + scoped_refptr<media::VideoFrame> frame_to_upload( + reinterpret_cast<media::VideoFrame*>(buffer)); + DCHECK_EQ(frame->width(), frame_to_upload->width()); + DCHECK_EQ(frame->height(), frame_to_upload->height()); + DCHECK_EQ(frame->format(), frame_to_upload->format()); + glTexImage2D( + GL_TEXTURE_2D, 0, GL_RGBA, frame_to_upload->width(), + frame_to_upload->height(), 0, GL_RGBA, + GL_UNSIGNED_BYTE, frame_to_upload->data(media::VideoFrame::kRGBPlane)); + return true; +} diff --git a/chrome/gpu/media/fake_gl_video_device.h b/chrome/gpu/media/fake_gl_video_device.h new file mode 100644 index 0000000..711c3ef --- /dev/null +++ b/chrome/gpu/media/fake_gl_video_device.h @@ -0,0 +1,27 @@ +// Copyright (c) 2010 The Chromium Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#ifndef CHROME_GPU_MEDIA_FAKE_GL_VIDEO_DEVICE_H_ +#define CHROME_GPU_MEDIA_FAKE_GL_VIDEO_DEVICE_H_ + +#include "chrome/gpu/media/gpu_video_device.h" + +// A simple GpuVideoDevice that create VideoFrame with GL textures. +// It uploads frames in RGBA format in system memory to the GL texture. +class FakeGlVideoDevice : public GpuVideoDevice { + public: + virtual ~FakeGlVideoDevice() {} + + virtual void* GetDevice(); + virtual bool CreateVideoFrameFromGlTextures( + size_t width, size_t height, media::VideoFrame::Format format, + const std::vector<media::VideoFrame::GlTexture>& textures, + scoped_refptr<media::VideoFrame>* frame); + virtual void ReleaseVideoFrame( + const scoped_refptr<media::VideoFrame>& frame); + virtual bool UploadToVideoFrame(void* buffer, + scoped_refptr<media::VideoFrame> frame); +}; + +#endif // CHROME_GPU_MEDIA_FAKE_GL_VIDEO_DEVICE_H_ diff --git a/chrome/gpu/media/gpu_video_device.h b/chrome/gpu/media/gpu_video_device.h index 7998070..0556903 100644 --- a/chrome/gpu/media/gpu_video_device.h +++ b/chrome/gpu/media/gpu_video_device.h @@ -5,6 +5,8 @@ #ifndef CHROME_GPU_MEDIA_GPU_VIDEO_DEVICE_H_ #define CHROME_GPU_MEDIA_GPU_VIDEO_DEVICE_H_ +#include <vector> + #include "media/base/video_frame.h" #include "media/video/video_decode_context.h" @@ -31,16 +33,25 @@ class GpuVideoDevice { // // VideoFrame generated is used by VideoDecodeEngine for output buffer. // - // |frames| will contain the set of VideoFrame(s) generated. + // |frame| will contain the VideoFrame generated. // // Return true if the operation was successful. virtual bool CreateVideoFrameFromGlTextures( size_t width, size_t height, media::VideoFrame::Format format, - media::VideoFrame::GlTexture const* textures, + const std::vector<media::VideoFrame::GlTexture>& textures, scoped_refptr<media::VideoFrame>* frame) = 0; // Release VideoFrame generated. - virtual void ReleaseVideoFrame(scoped_refptr<media::VideoFrame> frame) = 0; + virtual void ReleaseVideoFrame( + const scoped_refptr<media::VideoFrame>& frame) = 0; + + // Upload a device specific buffer to a VideoFrame object that can be used in + // the GPU process. + // + // Return true if successful. + // TODO(hclam): Rename this to ConvertToVideoFrame(). + virtual bool UploadToVideoFrame(void* buffer, + scoped_refptr<media::VideoFrame> frame) = 0; }; #endif // CHROME_GPU_MEDIA_GPU_VIDEO_DEVICE_H_ |