diff options
Diffstat (limited to 'chrome')
-rw-r--r-- | chrome/common/gpu_messages_internal.h | 2 | ||||
-rw-r--r-- | chrome/common/gpu_video_common.cc | 35 | ||||
-rw-r--r-- | chrome/common/gpu_video_common.h | 18 | ||||
-rw-r--r-- | chrome/gpu/gpu_video_decoder.cc | 183 | ||||
-rw-r--r-- | chrome/gpu/gpu_video_decoder.h | 83 | ||||
-rw-r--r-- | chrome/renderer/gpu_video_decoder_host.cc | 37 | ||||
-rw-r--r-- | chrome/renderer/gpu_video_decoder_host.h | 1 | ||||
-rw-r--r-- | chrome/renderer/media/gles2_video_decode_context.cc | 8 | ||||
-rw-r--r-- | chrome/renderer/media/gles2_video_decode_context.h | 9 | ||||
-rw-r--r-- | chrome/renderer/media/ipc_video_decoder.cc | 7 |
10 files changed, 163 insertions, 220 deletions
diff --git a/chrome/common/gpu_messages_internal.h b/chrome/common/gpu_messages_internal.h index 5a4d505..229e3db 100644 --- a/chrome/common/gpu_messages_internal.h +++ b/chrome/common/gpu_messages_internal.h @@ -336,7 +336,7 @@ IPC_BEGIN_MESSAGES(GpuVideoDecoderHost) GpuVideoDecoderOutputBufferParam) // Allocate video frames for output of the hardware video decoder. - IPC_MESSAGE_ROUTED4(GpuVideoDecoderHostMsg_AllocateVideoFrame, + IPC_MESSAGE_ROUTED4(GpuVideoDecoderHostMsg_AllocateVideoFrames, int32, /* Numer of video frames to generate */ int32, /* Width of the video frame */ int32, /* Height of the video frame */ diff --git a/chrome/common/gpu_video_common.cc b/chrome/common/gpu_video_common.cc index 826ef20..5c2e9257 100644 --- a/chrome/common/gpu_video_common.cc +++ b/chrome/common/gpu_video_common.cc @@ -86,32 +86,22 @@ void ParamTraits<GpuVideoDecoderInitParam>::Log( void ParamTraits<GpuVideoDecoderInitDoneParam>::Write( Message* m, const GpuVideoDecoderInitDoneParam& p) { WriteParam(m, p.success); - WriteParam(m, p.stride); - WriteParam(m, p.format); - WriteParam(m, p.surface_type); WriteParam(m, p.input_buffer_size); - WriteParam(m, p.output_buffer_size); WriteParam(m, p.input_buffer_handle); - WriteParam(m, p.output_buffer_handle); } bool ParamTraits<GpuVideoDecoderInitDoneParam>::Read( const Message* m, void** iter, GpuVideoDecoderInitDoneParam* r) { if (!ReadParam(m, iter, &r->success) || - !ReadParam(m, iter, &r->stride) || - !ReadParam(m, iter, &r->format) || - !ReadParam(m, iter, &r->surface_type) || !ReadParam(m, iter, &r->input_buffer_size) || - !ReadParam(m, iter, &r->output_buffer_size) || - !ReadParam(m, iter, &r->input_buffer_handle) || - !ReadParam(m, iter, &r->output_buffer_handle)) + !ReadParam(m, iter, &r->input_buffer_handle)) return false; return true; } void ParamTraits<GpuVideoDecoderInitDoneParam>::Log( const GpuVideoDecoderInitDoneParam& p, std::string* l) { - l->append(StringPrintf("(%d)", p.stride)); + l->append(StringPrintf("(%d %d)", p.success, p.input_buffer_size)); } /////////////////////////////////////////////////////////////////////////////// @@ -143,29 +133,29 @@ void ParamTraits<GpuVideoDecoderInputBufferParam>::Log( void ParamTraits<GpuVideoDecoderOutputBufferParam>::Write( Message* m, const GpuVideoDecoderOutputBufferParam& p) { + WriteParam(m, p.frame_id); WriteParam(m, p.timestamp); WriteParam(m, p.duration); WriteParam(m, p.flags); - WriteParam(m, p.texture); } bool ParamTraits<GpuVideoDecoderOutputBufferParam>::Read( const Message* m, void** iter, GpuVideoDecoderOutputBufferParam* r) { - if (!ReadParam(m, iter, &r->timestamp) || + if (!ReadParam(m, iter, &r->frame_id) || + !ReadParam(m, iter, &r->timestamp) || !ReadParam(m, iter, &r->duration) || - !ReadParam(m, iter, &r->flags) || - !ReadParam(m, iter, &r->texture)) + !ReadParam(m, iter, &r->flags)) return false; return true; } void ParamTraits<GpuVideoDecoderOutputBufferParam>::Log( const GpuVideoDecoderOutputBufferParam& p, std::string* l) { - l->append(StringPrintf("(%d %d) %x texture = x%d", + l->append(StringPrintf("(%d %d %d %x)", + p.frame_id, static_cast<int>(p.timestamp), static_cast<int>(p.duration), - p.flags, - p.texture)); + p.flags)); } /////////////////////////////////////////////////////////////////////////////// @@ -192,21 +182,18 @@ void ParamTraits<GpuVideoDecoderErrorInfoParam>::Log( void ParamTraits<GpuVideoDecoderFormatChangeParam>::Write( Message* m, const GpuVideoDecoderFormatChangeParam& p) { WriteParam(m, p.input_buffer_size); - WriteParam(m, p.output_buffer_size); } bool ParamTraits<GpuVideoDecoderFormatChangeParam>::Read( const Message* m, void** iter, GpuVideoDecoderFormatChangeParam* r) { - if (!ReadParam(m, iter, &r->input_buffer_size) || - !ReadParam(m, iter, &r->output_buffer_size)) + if (!ReadParam(m, iter, &r->input_buffer_size)) return false; return true; } void ParamTraits<GpuVideoDecoderFormatChangeParam>::Log( const GpuVideoDecoderFormatChangeParam& p, std::string* l) { - l->append(StringPrintf("(%d %d)", p.input_buffer_size, - p.output_buffer_size)); + l->append(StringPrintf("%d", p.input_buffer_size)); } /////////////////////////////////////////////////////////////////////////////// diff --git a/chrome/common/gpu_video_common.h b/chrome/common/gpu_video_common.h index ee3f1e3..8102f3d 100644 --- a/chrome/common/gpu_video_common.h +++ b/chrome/common/gpu_video_common.h @@ -48,15 +48,8 @@ struct GpuVideoDecoderInitParam { struct GpuVideoDecoderInitDoneParam { int32 success; // other parameter is only meaningful when this is true. - int32 provides_buffer; - media::VideoFrame::Format format; - int32 surface_type; // TODO(hclam): Remove this. We only pass GL textures. - int32 stride; int32 input_buffer_size; - int32 output_buffer_size; base::SharedMemoryHandle input_buffer_handle; - // we do not need this if hardware composition is ready. - base::SharedMemoryHandle output_buffer_handle; }; struct GpuVideoDecoderInputBufferParam { @@ -66,16 +59,14 @@ struct GpuVideoDecoderInputBufferParam { int32 flags; // miscellaneous flag bit mask }; +// A message that contains formation of a video frame that is ready to be +// rendered by the Renderer process. struct GpuVideoDecoderOutputBufferParam { + int32 frame_id; // ID of the video frame that is ready to be rendered. int64 timestamp; // In unit of microseconds. int64 duration; // In unit of microseconds. int32 flags; // miscellaneous flag bit mask - // TODO(hclam): This is really ugly and should be removed. Instead of sending - // a texture id we should send a buffer id that signals that a buffer is ready - // to be consumed. Before that we need API to establish the buffers. - int32 texture; - enum { kFlagsEndOfStream = 0x00000001, kFlagsDiscontinuous = 0x00000002, @@ -88,11 +79,8 @@ struct GpuVideoDecoderErrorInfoParam { // TODO(jiesun): define this. struct GpuVideoDecoderFormatChangeParam { - int32 stride; int32 input_buffer_size; - int32 output_buffer_size; base::SharedMemoryHandle input_buffer_handle; - base::SharedMemoryHandle output_buffer_handle; }; namespace IPC { diff --git a/chrome/gpu/gpu_video_decoder.cc b/chrome/gpu/gpu_video_decoder.cc index bcdce82..eb1823a 100644 --- a/chrome/gpu/gpu_video_decoder.cc +++ b/chrome/gpu/gpu_video_decoder.cc @@ -53,63 +53,17 @@ bool GpuVideoDecoder::CreateInputTransferBuffer( return true; } -bool GpuVideoDecoder::CreateOutputTransferBuffer( - uint32 size, - base::SharedMemoryHandle* handle) { - output_transfer_buffer_.reset(new base::SharedMemory); - if (!output_transfer_buffer_.get()) - return false; - - if (!output_transfer_buffer_->Create(std::wstring(), false, false, size)) - return false; - - if (!output_transfer_buffer_->Map(size)) - return false; - - if (!output_transfer_buffer_->ShareToProcess(renderer_handle_, handle)) - return false; - - return true; -} - -void GpuVideoDecoder::CreateVideoFrameOnTransferBuffer() { - const base::TimeDelta kZero; - uint8* data[media::VideoFrame::kMaxPlanes]; - int32 strides[media::VideoFrame::kMaxPlanes]; - memset(data, 0, sizeof(data)); - memset(strides, 0, sizeof(strides)); - data[0] = static_cast<uint8*>(output_transfer_buffer_->memory()); - data[1] = data[0] + config_.width * config_.height; - data[2] = data[1] + config_.width * config_.height / 4; - strides[0] = config_.width; - strides[1] = strides[2] = config_.width >> 1; - media::VideoFrame:: CreateFrameExternal( - media::VideoFrame::TYPE_SYSTEM_MEMORY, - media::VideoFrame::YV12, - config_.width, config_.height, 3, - data, strides, - kZero, kZero, - NULL, - &frame_); -} - void GpuVideoDecoder::OnInitializeComplete(const VideoCodecInfo& info) { info_ = info; GpuVideoDecoderInitDoneParam param; param.success = false; param.input_buffer_handle = base::SharedMemory::NULLHandle(); - param.output_buffer_handle = base::SharedMemory::NULLHandle(); if (!info.success) { SendInitializeDone(param); return; } - // Translate surface type. - // TODO(hclam): Remove |surface_type| since we are always passing textures. - param.surface_type = static_cast<int>(info.stream_info.surface_type); - param.format = info.stream_info.surface_format; - // TODO(jiesun): Check the assumption of input size < original size. param.input_buffer_size = config_.width * config_.height * 3 / 2; if (!CreateInputTransferBuffer(param.input_buffer_size, @@ -118,31 +72,7 @@ void GpuVideoDecoder::OnInitializeComplete(const VideoCodecInfo& info) { return; } - if (info.stream_info.surface_type == VideoFrame::TYPE_SYSTEM_MEMORY) { - // TODO(jiesun): Allocate this according to the surface format. - // The format actually could change during streaming, we need to - // notify GpuVideoDecoderHost side when this happened and renegotiate - // the transfer buffer. - switch (info.stream_info.surface_format) { - case VideoFrame::YV12: - // TODO(jiesun): take stride into account. - param.output_buffer_size = - config_.width * config_.height * 3 / 2; - break; - default: - NOTREACHED(); - } - - if (!CreateOutputTransferBuffer(param.output_buffer_size, - ¶m.output_buffer_handle)) { - SendInitializeDone(param); - return; - } - CreateVideoFrameOnTransferBuffer(); - } - param.success = true; - SendInitializeDone(param); } @@ -176,21 +106,16 @@ void GpuVideoDecoder::ConsumeVideoFrame(scoped_refptr<VideoFrame> frame) { output_param.duration = frame->GetDuration().InMicroseconds(); output_param.flags = frame->IsEndOfStream() ? GpuVideoDecoderOutputBufferParam::kFlagsEndOfStream : 0; - // TODO(hclam): We should have the conversion between VideoFrame and the - // IPC transport param done in GpuVideoDevice. - // This is a hack to pass texture back as a param. - output_param.texture = frame->gl_texture(media::VideoFrame::kRGBPlane); SendFillBufferDone(output_param); } void* GpuVideoDecoder::GetDevice() { - // Simply delegate the method call to GpuVideoDevice. - return decode_context_->GetDevice(); + return video_device_->GetDevice(); } void GpuVideoDecoder::AllocateVideoFrames( - int n, size_t width, size_t height, - AllocationCompleteCallback* callback) { + int n, size_t width, size_t height, media::VideoFrame::Format format, + std::vector<scoped_refptr<media::VideoFrame> >* frames, Task* task) { // Since the communication between Renderer and GPU process is by GL textures. // We need to obtain a set of GL textures by sending IPC commands to the // Renderer process. The recipient of these commands will be IpcVideoDecoder. @@ -208,20 +133,37 @@ void GpuVideoDecoder::AllocateVideoFrames( // // Note that this method is called when there's no video frames allocated or // they were all released. + DCHECK(video_frame_map_.empty()); + + // Save the parameters for allocation. + pending_allocation_.reset(new PendingAllocation()); + pending_allocation_->n = n; + pending_allocation_->width = width; + pending_allocation_->height = height; + pending_allocation_->format = format; + pending_allocation_->frames = frames; + pending_allocation_->task = task; + SendAllocateVideoFrames(n, width, height, format); } -void GpuVideoDecoder::ReleaseVideoFrames(int n, VideoFrame* frames) { +void GpuVideoDecoder::ReleaseAllVideoFrames() { // This method will first call to GpuVideoDevice to release all the resource // associated with a VideoFrame. // - // And when we'll call GpuVideoDevice::ReleaseVideoFrames to remove the set + // And then we'll call GpuVideoDevice::ReleaseVideoFrame() to remove the set // of Gl textures associated with the context. // // And finally we'll send IPC commands to IpcVideoDecoder to destroy all // GL textures generated. + for (VideoFrameMap::iterator i = video_frame_map_.begin(); + i != video_frame_map_.end(); ++i) { + video_device_->ReleaseVideoFrame(i->second); + } + video_frame_map_.clear(); + SendReleaseAllVideoFrames(); } -void GpuVideoDecoder::Destroy(DestructionCompleteCallback* callback) { +void GpuVideoDecoder::Destroy(Task* task) { // TODO(hclam): I still need to think what I should do here. } @@ -231,7 +173,6 @@ GpuVideoDecoder::GpuVideoDecoder( base::ProcessHandle handle, gpu::gles2::GLES2Decoder* decoder) : decoder_host_route_id_(param->decoder_host_route_id), - output_transfer_buffer_busy_(false), pending_output_requests_(0), channel_(channel), renderer_handle_(handle), @@ -260,8 +201,6 @@ void GpuVideoDecoder::OnUninitialize() { } void GpuVideoDecoder::OnFlush() { - // TODO(jiesun): this is wrong?? - output_transfer_buffer_busy_ = false; pending_output_requests_ = 0; decode_engine_->Flush(); @@ -292,42 +231,56 @@ void GpuVideoDecoder::OnFillThisBuffer( if (info_.stream_info.surface_type == VideoFrame::TYPE_SYSTEM_MEMORY) { pending_output_requests_++; - if (!output_transfer_buffer_busy_) { - output_transfer_buffer_busy_ = true; - decode_engine_->ProduceVideoFrame(frame_); - } } else { - // TODO(hclam): I need to rethink how to delegate calls to - // VideoDecodeEngine, I may need to create a GpuVideoDecodeContext that - // provides a method for me to make calls to VideoDecodeEngine with the - // correct VideoFrame. - DCHECK_EQ(VideoFrame::TYPE_GL_TEXTURE, info_.stream_info.surface_type); - - scoped_refptr<media::VideoFrame> frame; - VideoFrame::GlTexture textures[3] = { param.texture, 0, 0 }; - - media::VideoFrame:: CreateFrameGlTexture( - media::VideoFrame::RGBA, config_.width, config_.height, textures, - base::TimeDelta(), base::TimeDelta(), &frame); - decode_engine_->ProduceVideoFrame(frame); } } void GpuVideoDecoder::OnFillThisBufferDoneACK() { if (info_.stream_info.surface_type == VideoFrame::TYPE_SYSTEM_MEMORY) { - output_transfer_buffer_busy_ = false; pending_output_requests_--; if (pending_output_requests_) { - output_transfer_buffer_busy_ = true; decode_engine_->ProduceVideoFrame(frame_); } } } +void GpuVideoDecoder::OnVideoFrameAllocated(int32 frame_id, + std::vector<uint32> textures) { + // This method is called in response to a video frame allocation request sent + // to the Renderer process. + // We should use the textures to generate a VideoFrame by using + // GpuVideoDevice. The VideoFrame created is added to the internal map. + // If we have generated enough VideoFrame, we call |allocation_callack_| to + // complete the allocation process. + media::VideoFrame::GlTexture gl_textures[media::VideoFrame::kMaxPlanes]; + memset(gl_textures, 0, sizeof(gl_textures)); + for (size_t i = 0; i < textures.size(); ++i) { + // Translate the client texture id to service texture id. + bool ret = gles2_decoder_->GetServiceTextureId(textures[i], + gl_textures + i); + DCHECK(ret) << "Cannot translate client texture ID to service ID"; + } + + scoped_refptr<media::VideoFrame> frame; + bool ret = video_device_->CreateVideoFrameFromGlTextures( + pending_allocation_->width, pending_allocation_->height, + pending_allocation_->format, gl_textures, &frame); + + DCHECK(ret) << "Failed to allocation VideoFrame from GL textures)"; + pending_allocation_->frames->push_back(frame); + video_frame_map_.insert(std::make_pair(frame_id, frame)); + + if (video_frame_map_.size() == pending_allocation_->n) { + pending_allocation_->task->Run(); + delete pending_allocation_->task; + pending_allocation_.reset(); + } +} + void GpuVideoDecoder::SendInitializeDone( const GpuVideoDecoderInitDoneParam& param) { if (!channel_->Send( - new GpuVideoDecoderHostMsg_InitializeACK(route_id(), param))) { + new GpuVideoDecoderHostMsg_InitializeACK(route_id(), param))) { LOG(ERROR) << "GpuVideoDecoderMsg_InitializeACK failed"; } } @@ -346,14 +299,14 @@ void GpuVideoDecoder::SendFlushDone() { void GpuVideoDecoder::SendEmptyBufferDone() { if (!channel_->Send( - new GpuVideoDecoderHostMsg_EmptyThisBufferDone(route_id()))) { + new GpuVideoDecoderHostMsg_EmptyThisBufferDone(route_id()))) { LOG(ERROR) << "GpuVideoDecoderMsg_EmptyThisBufferDone failed"; } } void GpuVideoDecoder::SendEmptyBufferACK() { if (!channel_->Send( - new GpuVideoDecoderHostMsg_EmptyThisBufferACK(route_id()))) { + new GpuVideoDecoderHostMsg_EmptyThisBufferACK(route_id()))) { LOG(ERROR) << "GpuVideoDecoderMsg_EmptyThisBufferACK failed"; } } @@ -361,7 +314,23 @@ void GpuVideoDecoder::SendEmptyBufferACK() { void GpuVideoDecoder::SendFillBufferDone( const GpuVideoDecoderOutputBufferParam& param) { if (!channel_->Send( - new GpuVideoDecoderHostMsg_FillThisBufferDone(route_id(), param))) { + new GpuVideoDecoderHostMsg_FillThisBufferDone(route_id(), param))) { LOG(ERROR) << "GpuVideoDecoderMsg_FillThisBufferDone failed"; } } + +void GpuVideoDecoder::SendAllocateVideoFrames( + int n, size_t width, size_t height, media::VideoFrame::Format format) { + if (!channel_->Send( + new GpuVideoDecoderHostMsg_AllocateVideoFrames( + route_id(), n, width, height, format))) { + LOG(ERROR) << "GpuVideoDecoderMsg_AllocateVideoFrames failed"; + } +} + +void GpuVideoDecoder::SendReleaseAllVideoFrames() { + if (!channel_->Send( + new GpuVideoDecoderHostMsg_ReleaseAllVideoFrames(route_id()))) { + LOG(ERROR) << "GpuVideoDecoderMsg_ReleaseAllVideoFrames failed"; + } +} diff --git a/chrome/gpu/gpu_video_decoder.h b/chrome/gpu/gpu_video_decoder.h index 7aabffa..9f77700 100644 --- a/chrome/gpu/gpu_video_decoder.h +++ b/chrome/gpu/gpu_video_decoder.h @@ -5,6 +5,9 @@ #ifndef CHROME_GPU_GPU_VIDEO_DECODER_H_ #define CHROME_GPU_GPU_VIDEO_DECODER_H_ +#include <map> +#include <vector> + #include "base/basictypes.h" #include "base/callback.h" #include "base/ref_counted.h" @@ -75,8 +78,8 @@ class GpuChannel; // will maintain such mapping. // class GpuVideoDecoder - : public IPC::Channel::Listener, - public base::RefCountedThreadSafe<GpuVideoDecoder>, + : public base::RefCountedThreadSafe<GpuVideoDecoder>, + public IPC::Channel::Listener, public media::VideoDecodeEngine::EventHandler, public media::VideoDecodeContext { @@ -98,10 +101,11 @@ class GpuVideoDecoder // VideoDecodeContext implementation. virtual void* GetDevice(); - virtual void AllocateVideoFrames(int n, size_t width, size_t height, - AllocationCompleteCallback* callback); - virtual void ReleaseVideoFrames(int n, VideoFrame* frames); - virtual void Destroy(DestructionCompleteCallback* callback); + virtual void AllocateVideoFrames( + int n, size_t width, size_t height, media::VideoFrame::Format format, + std::vector<scoped_refptr<media::VideoFrame> >* frames, Task* task); + virtual void ReleaseAllVideoFrames(); + virtual void Destroy(Task* task); // Constructor and destructor. GpuVideoDecoder(const GpuVideoDecoderInfoParam* param, @@ -111,19 +115,45 @@ class GpuVideoDecoder virtual ~GpuVideoDecoder() {} private: + struct PendingAllocation { + size_t n; + size_t width; + size_t height; + media::VideoFrame::Format format; + std::vector<scoped_refptr<media::VideoFrame> >* frames; + Task* task; + }; + int32 route_id() { return decoder_host_route_id_; } bool CreateInputTransferBuffer(uint32 size, base::SharedMemoryHandle* handle); - bool CreateOutputTransferBuffer(uint32 size, - base::SharedMemoryHandle* handle); - void CreateVideoFrameOnTransferBuffer(); + + // These methods are message handlers for the messages sent from the Renderer + // process. + void OnInitialize(const GpuVideoDecoderInitParam& param); + void OnUninitialize(); + void OnFlush(); + void OnEmptyThisBuffer(const GpuVideoDecoderInputBufferParam& buffer); + void OnFillThisBuffer(const GpuVideoDecoderOutputBufferParam& param); + void OnFillThisBufferDoneACK(); + void OnVideoFrameAllocated(int32 frame_id, std::vector<uint32> textures); + + // Helper methods for sending messages to the Renderer process. + void SendInitializeDone(const GpuVideoDecoderInitDoneParam& param); + void SendUninitializeDone(); + void SendFlushDone(); + void SendEmptyBufferDone(); + void SendEmptyBufferACK(); + void SendFillBufferDone(const GpuVideoDecoderOutputBufferParam& param); + void SendAllocateVideoFrames( + int n, size_t width, size_t height, media::VideoFrame::Format format); + void SendReleaseAllVideoFrames(); int32 decoder_host_route_id_; // Used only in system memory path. i.e. Remove this later. scoped_refptr<VideoFrame> frame_; - bool output_transfer_buffer_busy_; int32 pending_output_requests_; GpuChannel* channel_; @@ -133,29 +163,26 @@ class GpuVideoDecoder // is used to switch context and translate client texture ID to service ID. gpu::gles2::GLES2Decoder* gles2_decoder_; + // Memory for transfering the input data for the hardware video decoder. scoped_ptr<base::SharedMemory> input_transfer_buffer_; - scoped_ptr<base::SharedMemory> output_transfer_buffer_; + // VideoDecodeEngine is used to do the actual video decoding. scoped_ptr<media::VideoDecodeEngine> decode_engine_; - scoped_ptr<GpuVideoDevice> decode_context_; - media::VideoCodecConfig config_; - media::VideoCodecInfo info_; - // Input message handler. - void OnInitialize(const GpuVideoDecoderInitParam& param); - void OnUninitialize(); - void OnFlush(); - void OnEmptyThisBuffer(const GpuVideoDecoderInputBufferParam& buffer); - void OnFillThisBuffer(const GpuVideoDecoderOutputBufferParam& param); - void OnFillThisBufferDoneACK(); + // GpuVideoDevice is used to generate VideoFrame(s) from GL textures. The + // frames generated are understood by the decode engine. + scoped_ptr<GpuVideoDevice> video_device_; - // Output message helper. - void SendInitializeDone(const GpuVideoDecoderInitDoneParam& param); - void SendUninitializeDone(); - void SendFlushDone(); - void SendEmptyBufferDone(); - void SendEmptyBufferACK(); - void SendFillBufferDone(const GpuVideoDecoderOutputBufferParam& param); + // Contain information for allocation VideoFrame(s). + scoped_ptr<PendingAllocation> pending_allocation_; + + // Contains the mapping between a |frame_id| and VideoFrame generated by + // GpuVideoDevice from the associated GL textures. + typedef std::map<int32, scoped_refptr<media::VideoFrame> > VideoFrameMap; + VideoFrameMap video_frame_map_; + + media::VideoCodecConfig config_; + media::VideoCodecInfo info_; DISALLOW_COPY_AND_ASSIGN(GpuVideoDecoder); }; diff --git a/chrome/renderer/gpu_video_decoder_host.cc b/chrome/renderer/gpu_video_decoder_host.cc index a585ede..771c38a 100644 --- a/chrome/renderer/gpu_video_decoder_host.cc +++ b/chrome/renderer/gpu_video_decoder_host.cc @@ -109,12 +109,9 @@ void GpuVideoDecoderHost::FillThisBuffer(scoped_refptr<VideoFrame> frame) { // TODO(hclam): We should keep an IDMap to convert between a frame a buffer // ID so that we can signal GpuVideoDecoder in GPU process to use the buffer. // This eliminates one conversion step. + // TODO(hclam): Fill the param. GpuVideoDecoderOutputBufferParam param; - // TODO(hclam): This is a hack to pass the texture id to the hardware video - // decoder. We should have created a mapping between VideoFrame and buffer id - // and we pass the buffer id to the GPU process. - param.texture = frame->gl_texture(VideoFrame::kRGBPlane); if (!channel_host_ || !channel_host_->Send( new GpuVideoDecoderMsg_FillThisBuffer(route_id(), param))) { LOG(ERROR) << "GpuVideoDecoderMsg_FillThisBuffer failed"; @@ -152,13 +149,6 @@ void GpuVideoDecoderHost::OnInitializeDone( if (!input_transfer_buffer_->Map(param.input_buffer_size)) break; - if (!base::SharedMemory::IsHandleValid(param.output_buffer_handle)) - break; - output_transfer_buffer_.reset( - new base::SharedMemory(param.output_buffer_handle, false)); - if (!output_transfer_buffer_->Map(param.output_buffer_size)) - break; - success = true; } while (0); @@ -168,7 +158,6 @@ void GpuVideoDecoderHost::OnInitializeDone( void GpuVideoDecoderHost::OnUninitializeDone() { input_transfer_buffer_.reset(); - output_transfer_buffer_.reset(); event_handler_->OnUninitializeDone(); } @@ -189,27 +178,11 @@ void GpuVideoDecoderHost::OnFillThisBufferDone( if (param.flags & GpuVideoDecoderOutputBufferParam::kFlagsEndOfStream) { VideoFrame::CreateEmptyFrame(&frame); - } else if (done_param_.surface_type == - media::VideoFrame::TYPE_SYSTEM_MEMORY) { - VideoFrame::CreateFrame(VideoFrame::YV12, - init_param_.width, - init_param_.height, - base::TimeDelta::FromMicroseconds(param.timestamp), - base::TimeDelta::FromMicroseconds(param.duration), - &frame); - uint8* src = static_cast<uint8*>(output_transfer_buffer_->memory()); - uint8* data0 = frame->data(0); - uint8* data1 = frame->data(1); - uint8* data2 = frame->data(2); - int32 size = init_param_.width * init_param_.height; - memcpy(data0, src, size); - memcpy(data1, src + size, size / 4); - memcpy(data2, src + size + size / 4, size / 4); - } else if (done_param_.surface_type == media::VideoFrame::TYPE_GL_TEXTURE) { + } else { // TODO(hclam): The logic in buffer allocation is pretty much around - // using shared memory for output buffer which needs to be adjusted. For - // now we have to add this hack to get the texture id. - VideoFrame::GlTexture textures[3] = { param.texture, 0, 0 }; + // using shared memory for output buffer which needs to be adjusted. + // Fake the texture ID until we implement it properly. + VideoFrame::GlTexture textures[3] = { 0, 0, 0 }; media::VideoFrame::CreateFrameGlTexture( media::VideoFrame::RGBA, init_param_.width, init_param_.height, textures, diff --git a/chrome/renderer/gpu_video_decoder_host.h b/chrome/renderer/gpu_video_decoder_host.h index 02f8fc8..1255bf5 100644 --- a/chrome/renderer/gpu_video_decoder_host.h +++ b/chrome/renderer/gpu_video_decoder_host.h @@ -112,7 +112,6 @@ class GpuVideoDecoderHost // Transfer buffers for both input and output. // TODO(jiesun): remove output buffer when hardware composition is ready. scoped_ptr<base::SharedMemory> input_transfer_buffer_; - scoped_ptr<base::SharedMemory> output_transfer_buffer_; DISALLOW_COPY_AND_ASSIGN(GpuVideoDecoderHost); }; diff --git a/chrome/renderer/media/gles2_video_decode_context.cc b/chrome/renderer/media/gles2_video_decode_context.cc index 075f80a..2c04282 100644 --- a/chrome/renderer/media/gles2_video_decode_context.cc +++ b/chrome/renderer/media/gles2_video_decode_context.cc @@ -20,15 +20,15 @@ void* Gles2VideoDecodeContext::GetDevice() { } void Gles2VideoDecodeContext::AllocateVideoFrames( - int n, size_t width, size_t height, AllocationCompleteCallback* callback) { + int n, size_t width, size_t height, media::VideoFrame::Format format, + std::vector<scoped_refptr<media::VideoFrame> >* frames, Task* task) { // TODO(hclam): Implement. } -void Gles2VideoDecodeContext::ReleaseVideoFrames(int n, - media::VideoFrame* frames) { +void Gles2VideoDecodeContext::ReleaseAllVideoFrames() { // TODO(hclam): Implement. } -void Gles2VideoDecodeContext::Destroy(DestructionCompleteCallback* callback) { +void Gles2VideoDecodeContext::Destroy(Task* task) { // TODO(hclam): Implement. } diff --git a/chrome/renderer/media/gles2_video_decode_context.h b/chrome/renderer/media/gles2_video_decode_context.h index e087bb3..35958b6 100644 --- a/chrome/renderer/media/gles2_video_decode_context.h +++ b/chrome/renderer/media/gles2_video_decode_context.h @@ -95,10 +95,11 @@ class Gles2VideoDecodeContext : public media::VideoDecodeContext { // media::VideoDecodeContext implementation. virtual void* GetDevice(); - virtual void AllocateVideoFrames(int n, size_t width, size_t height, - AllocationCompleteCallback* callback); - virtual void ReleaseVideoFrames(int n, media::VideoFrame* frames); - virtual void Destroy(DestructionCompleteCallback* callback); + virtual void AllocateVideoFrames( + int n, size_t width, size_t height, media::VideoFrame::Format format, + std::vector<scoped_refptr<media::VideoFrame> >* frames, Task* task); + virtual void ReleaseAllVideoFrames(); + virtual void Destroy(Task* task) = 0; //-------------------------------------------------------------------------- // Any thread diff --git a/chrome/renderer/media/ipc_video_decoder.cc b/chrome/renderer/media/ipc_video_decoder.cc index 0b6896b..eda8696 100644 --- a/chrome/renderer/media/ipc_video_decoder.cc +++ b/chrome/renderer/media/ipc_video_decoder.cc @@ -111,10 +111,9 @@ void IpcVideoDecoder::OnInitializeDone( media::mime_type::kUncompressedVideo); media_format_.SetAsInteger(media::MediaFormat::kWidth, width_); media_format_.SetAsInteger(media::MediaFormat::kHeight, height_); - media_format_.SetAsInteger(media::MediaFormat::kSurfaceType, - static_cast<int>(param.surface_type)); - media_format_.SetAsInteger(media::MediaFormat::kSurfaceFormat, - static_cast<int>(param.format)); + media_format_.SetAsInteger( + media::MediaFormat::kSurfaceType, + static_cast<int>(media::VideoFrame::TYPE_GL_TEXTURE)); state_ = kPlaying; } else { LOG(ERROR) << "IpcVideoDecoder initialization failed!"; |