summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--chrome/common/gpu_messages_internal.h2
-rw-r--r--chrome/common/gpu_video_common.cc35
-rw-r--r--chrome/common/gpu_video_common.h18
-rw-r--r--chrome/gpu/gpu_video_decoder.cc183
-rw-r--r--chrome/gpu/gpu_video_decoder.h83
-rw-r--r--chrome/renderer/gpu_video_decoder_host.cc37
-rw-r--r--chrome/renderer/gpu_video_decoder_host.h1
-rw-r--r--chrome/renderer/media/gles2_video_decode_context.cc8
-rw-r--r--chrome/renderer/media/gles2_video_decode_context.h9
-rw-r--r--chrome/renderer/media/ipc_video_decoder.cc7
-rw-r--r--gpu/command_buffer/service/gles2_cmd_decoder.cc13
-rw-r--r--gpu/command_buffer/service/gles2_cmd_decoder.h7
-rw-r--r--media/video/video_decode_context.h35
13 files changed, 234 insertions, 204 deletions
diff --git a/chrome/common/gpu_messages_internal.h b/chrome/common/gpu_messages_internal.h
index 229e3db..5a4d505 100644
--- a/chrome/common/gpu_messages_internal.h
+++ b/chrome/common/gpu_messages_internal.h
@@ -336,7 +336,7 @@ IPC_BEGIN_MESSAGES(GpuVideoDecoderHost)
GpuVideoDecoderOutputBufferParam)
// Allocate video frames for output of the hardware video decoder.
- IPC_MESSAGE_ROUTED4(GpuVideoDecoderHostMsg_AllocateVideoFrames,
+ IPC_MESSAGE_ROUTED4(GpuVideoDecoderHostMsg_AllocateVideoFrame,
int32, /* Numer of video frames to generate */
int32, /* Width of the video frame */
int32, /* Height of the video frame */
diff --git a/chrome/common/gpu_video_common.cc b/chrome/common/gpu_video_common.cc
index 5c2e9257..826ef20 100644
--- a/chrome/common/gpu_video_common.cc
+++ b/chrome/common/gpu_video_common.cc
@@ -86,22 +86,32 @@ void ParamTraits<GpuVideoDecoderInitParam>::Log(
void ParamTraits<GpuVideoDecoderInitDoneParam>::Write(
Message* m, const GpuVideoDecoderInitDoneParam& p) {
WriteParam(m, p.success);
+ WriteParam(m, p.stride);
+ WriteParam(m, p.format);
+ WriteParam(m, p.surface_type);
WriteParam(m, p.input_buffer_size);
+ WriteParam(m, p.output_buffer_size);
WriteParam(m, p.input_buffer_handle);
+ WriteParam(m, p.output_buffer_handle);
}
bool ParamTraits<GpuVideoDecoderInitDoneParam>::Read(
const Message* m, void** iter, GpuVideoDecoderInitDoneParam* r) {
if (!ReadParam(m, iter, &r->success) ||
+ !ReadParam(m, iter, &r->stride) ||
+ !ReadParam(m, iter, &r->format) ||
+ !ReadParam(m, iter, &r->surface_type) ||
!ReadParam(m, iter, &r->input_buffer_size) ||
- !ReadParam(m, iter, &r->input_buffer_handle))
+ !ReadParam(m, iter, &r->output_buffer_size) ||
+ !ReadParam(m, iter, &r->input_buffer_handle) ||
+ !ReadParam(m, iter, &r->output_buffer_handle))
return false;
return true;
}
void ParamTraits<GpuVideoDecoderInitDoneParam>::Log(
const GpuVideoDecoderInitDoneParam& p, std::string* l) {
- l->append(StringPrintf("(%d %d)", p.success, p.input_buffer_size));
+ l->append(StringPrintf("(%d)", p.stride));
}
///////////////////////////////////////////////////////////////////////////////
@@ -133,29 +143,29 @@ void ParamTraits<GpuVideoDecoderInputBufferParam>::Log(
void ParamTraits<GpuVideoDecoderOutputBufferParam>::Write(
Message* m, const GpuVideoDecoderOutputBufferParam& p) {
- WriteParam(m, p.frame_id);
WriteParam(m, p.timestamp);
WriteParam(m, p.duration);
WriteParam(m, p.flags);
+ WriteParam(m, p.texture);
}
bool ParamTraits<GpuVideoDecoderOutputBufferParam>::Read(
const Message* m, void** iter, GpuVideoDecoderOutputBufferParam* r) {
- if (!ReadParam(m, iter, &r->frame_id) ||
- !ReadParam(m, iter, &r->timestamp) ||
+ if (!ReadParam(m, iter, &r->timestamp) ||
!ReadParam(m, iter, &r->duration) ||
- !ReadParam(m, iter, &r->flags))
+ !ReadParam(m, iter, &r->flags) ||
+ !ReadParam(m, iter, &r->texture))
return false;
return true;
}
void ParamTraits<GpuVideoDecoderOutputBufferParam>::Log(
const GpuVideoDecoderOutputBufferParam& p, std::string* l) {
- l->append(StringPrintf("(%d %d %d %x)",
- p.frame_id,
+ l->append(StringPrintf("(%d %d) %x texture = x%d",
static_cast<int>(p.timestamp),
static_cast<int>(p.duration),
- p.flags));
+ p.flags,
+ p.texture));
}
///////////////////////////////////////////////////////////////////////////////
@@ -182,18 +192,21 @@ void ParamTraits<GpuVideoDecoderErrorInfoParam>::Log(
void ParamTraits<GpuVideoDecoderFormatChangeParam>::Write(
Message* m, const GpuVideoDecoderFormatChangeParam& p) {
WriteParam(m, p.input_buffer_size);
+ WriteParam(m, p.output_buffer_size);
}
bool ParamTraits<GpuVideoDecoderFormatChangeParam>::Read(
const Message* m, void** iter, GpuVideoDecoderFormatChangeParam* r) {
- if (!ReadParam(m, iter, &r->input_buffer_size))
+ if (!ReadParam(m, iter, &r->input_buffer_size) ||
+ !ReadParam(m, iter, &r->output_buffer_size))
return false;
return true;
}
void ParamTraits<GpuVideoDecoderFormatChangeParam>::Log(
const GpuVideoDecoderFormatChangeParam& p, std::string* l) {
- l->append(StringPrintf("%d", p.input_buffer_size));
+ l->append(StringPrintf("(%d %d)", p.input_buffer_size,
+ p.output_buffer_size));
}
///////////////////////////////////////////////////////////////////////////////
diff --git a/chrome/common/gpu_video_common.h b/chrome/common/gpu_video_common.h
index 8102f3d..ee3f1e3 100644
--- a/chrome/common/gpu_video_common.h
+++ b/chrome/common/gpu_video_common.h
@@ -48,8 +48,15 @@ struct GpuVideoDecoderInitParam {
struct GpuVideoDecoderInitDoneParam {
int32 success; // other parameter is only meaningful when this is true.
+ int32 provides_buffer;
+ media::VideoFrame::Format format;
+ int32 surface_type; // TODO(hclam): Remove this. We only pass GL textures.
+ int32 stride;
int32 input_buffer_size;
+ int32 output_buffer_size;
base::SharedMemoryHandle input_buffer_handle;
+ // we do not need this if hardware composition is ready.
+ base::SharedMemoryHandle output_buffer_handle;
};
struct GpuVideoDecoderInputBufferParam {
@@ -59,14 +66,16 @@ struct GpuVideoDecoderInputBufferParam {
int32 flags; // miscellaneous flag bit mask
};
-// A message that contains formation of a video frame that is ready to be
-// rendered by the Renderer process.
struct GpuVideoDecoderOutputBufferParam {
- int32 frame_id; // ID of the video frame that is ready to be rendered.
int64 timestamp; // In unit of microseconds.
int64 duration; // In unit of microseconds.
int32 flags; // miscellaneous flag bit mask
+ // TODO(hclam): This is really ugly and should be removed. Instead of sending
+ // a texture id we should send a buffer id that signals that a buffer is ready
+ // to be consumed. Before that we need API to establish the buffers.
+ int32 texture;
+
enum {
kFlagsEndOfStream = 0x00000001,
kFlagsDiscontinuous = 0x00000002,
@@ -79,8 +88,11 @@ struct GpuVideoDecoderErrorInfoParam {
// TODO(jiesun): define this.
struct GpuVideoDecoderFormatChangeParam {
+ int32 stride;
int32 input_buffer_size;
+ int32 output_buffer_size;
base::SharedMemoryHandle input_buffer_handle;
+ base::SharedMemoryHandle output_buffer_handle;
};
namespace IPC {
diff --git a/chrome/gpu/gpu_video_decoder.cc b/chrome/gpu/gpu_video_decoder.cc
index eb1823a..bcdce82 100644
--- a/chrome/gpu/gpu_video_decoder.cc
+++ b/chrome/gpu/gpu_video_decoder.cc
@@ -53,17 +53,63 @@ bool GpuVideoDecoder::CreateInputTransferBuffer(
return true;
}
+bool GpuVideoDecoder::CreateOutputTransferBuffer(
+ uint32 size,
+ base::SharedMemoryHandle* handle) {
+ output_transfer_buffer_.reset(new base::SharedMemory);
+ if (!output_transfer_buffer_.get())
+ return false;
+
+ if (!output_transfer_buffer_->Create(std::wstring(), false, false, size))
+ return false;
+
+ if (!output_transfer_buffer_->Map(size))
+ return false;
+
+ if (!output_transfer_buffer_->ShareToProcess(renderer_handle_, handle))
+ return false;
+
+ return true;
+}
+
+void GpuVideoDecoder::CreateVideoFrameOnTransferBuffer() {
+ const base::TimeDelta kZero;
+ uint8* data[media::VideoFrame::kMaxPlanes];
+ int32 strides[media::VideoFrame::kMaxPlanes];
+ memset(data, 0, sizeof(data));
+ memset(strides, 0, sizeof(strides));
+ data[0] = static_cast<uint8*>(output_transfer_buffer_->memory());
+ data[1] = data[0] + config_.width * config_.height;
+ data[2] = data[1] + config_.width * config_.height / 4;
+ strides[0] = config_.width;
+ strides[1] = strides[2] = config_.width >> 1;
+ media::VideoFrame:: CreateFrameExternal(
+ media::VideoFrame::TYPE_SYSTEM_MEMORY,
+ media::VideoFrame::YV12,
+ config_.width, config_.height, 3,
+ data, strides,
+ kZero, kZero,
+ NULL,
+ &frame_);
+}
+
void GpuVideoDecoder::OnInitializeComplete(const VideoCodecInfo& info) {
info_ = info;
GpuVideoDecoderInitDoneParam param;
param.success = false;
param.input_buffer_handle = base::SharedMemory::NULLHandle();
+ param.output_buffer_handle = base::SharedMemory::NULLHandle();
if (!info.success) {
SendInitializeDone(param);
return;
}
+ // Translate surface type.
+ // TODO(hclam): Remove |surface_type| since we are always passing textures.
+ param.surface_type = static_cast<int>(info.stream_info.surface_type);
+ param.format = info.stream_info.surface_format;
+
// TODO(jiesun): Check the assumption of input size < original size.
param.input_buffer_size = config_.width * config_.height * 3 / 2;
if (!CreateInputTransferBuffer(param.input_buffer_size,
@@ -72,7 +118,31 @@ void GpuVideoDecoder::OnInitializeComplete(const VideoCodecInfo& info) {
return;
}
+ if (info.stream_info.surface_type == VideoFrame::TYPE_SYSTEM_MEMORY) {
+ // TODO(jiesun): Allocate this according to the surface format.
+ // The format actually could change during streaming, we need to
+ // notify GpuVideoDecoderHost side when this happened and renegotiate
+ // the transfer buffer.
+ switch (info.stream_info.surface_format) {
+ case VideoFrame::YV12:
+ // TODO(jiesun): take stride into account.
+ param.output_buffer_size =
+ config_.width * config_.height * 3 / 2;
+ break;
+ default:
+ NOTREACHED();
+ }
+
+ if (!CreateOutputTransferBuffer(param.output_buffer_size,
+ &param.output_buffer_handle)) {
+ SendInitializeDone(param);
+ return;
+ }
+ CreateVideoFrameOnTransferBuffer();
+ }
+
param.success = true;
+
SendInitializeDone(param);
}
@@ -106,16 +176,21 @@ void GpuVideoDecoder::ConsumeVideoFrame(scoped_refptr<VideoFrame> frame) {
output_param.duration = frame->GetDuration().InMicroseconds();
output_param.flags = frame->IsEndOfStream() ?
GpuVideoDecoderOutputBufferParam::kFlagsEndOfStream : 0;
+ // TODO(hclam): We should have the conversion between VideoFrame and the
+ // IPC transport param done in GpuVideoDevice.
+ // This is a hack to pass texture back as a param.
+ output_param.texture = frame->gl_texture(media::VideoFrame::kRGBPlane);
SendFillBufferDone(output_param);
}
void* GpuVideoDecoder::GetDevice() {
- return video_device_->GetDevice();
+ // Simply delegate the method call to GpuVideoDevice.
+ return decode_context_->GetDevice();
}
void GpuVideoDecoder::AllocateVideoFrames(
- int n, size_t width, size_t height, media::VideoFrame::Format format,
- std::vector<scoped_refptr<media::VideoFrame> >* frames, Task* task) {
+ int n, size_t width, size_t height,
+ AllocationCompleteCallback* callback) {
// Since the communication between Renderer and GPU process is by GL textures.
// We need to obtain a set of GL textures by sending IPC commands to the
// Renderer process. The recipient of these commands will be IpcVideoDecoder.
@@ -133,37 +208,20 @@ void GpuVideoDecoder::AllocateVideoFrames(
//
// Note that this method is called when there's no video frames allocated or
// they were all released.
- DCHECK(video_frame_map_.empty());
-
- // Save the parameters for allocation.
- pending_allocation_.reset(new PendingAllocation());
- pending_allocation_->n = n;
- pending_allocation_->width = width;
- pending_allocation_->height = height;
- pending_allocation_->format = format;
- pending_allocation_->frames = frames;
- pending_allocation_->task = task;
- SendAllocateVideoFrames(n, width, height, format);
}
-void GpuVideoDecoder::ReleaseAllVideoFrames() {
+void GpuVideoDecoder::ReleaseVideoFrames(int n, VideoFrame* frames) {
// This method will first call to GpuVideoDevice to release all the resource
// associated with a VideoFrame.
//
- // And then we'll call GpuVideoDevice::ReleaseVideoFrame() to remove the set
+ // And when we'll call GpuVideoDevice::ReleaseVideoFrames to remove the set
// of Gl textures associated with the context.
//
// And finally we'll send IPC commands to IpcVideoDecoder to destroy all
// GL textures generated.
- for (VideoFrameMap::iterator i = video_frame_map_.begin();
- i != video_frame_map_.end(); ++i) {
- video_device_->ReleaseVideoFrame(i->second);
- }
- video_frame_map_.clear();
- SendReleaseAllVideoFrames();
}
-void GpuVideoDecoder::Destroy(Task* task) {
+void GpuVideoDecoder::Destroy(DestructionCompleteCallback* callback) {
// TODO(hclam): I still need to think what I should do here.
}
@@ -173,6 +231,7 @@ GpuVideoDecoder::GpuVideoDecoder(
base::ProcessHandle handle,
gpu::gles2::GLES2Decoder* decoder)
: decoder_host_route_id_(param->decoder_host_route_id),
+ output_transfer_buffer_busy_(false),
pending_output_requests_(0),
channel_(channel),
renderer_handle_(handle),
@@ -201,6 +260,8 @@ void GpuVideoDecoder::OnUninitialize() {
}
void GpuVideoDecoder::OnFlush() {
+ // TODO(jiesun): this is wrong??
+ output_transfer_buffer_busy_ = false;
pending_output_requests_ = 0;
decode_engine_->Flush();
@@ -231,56 +292,42 @@ void GpuVideoDecoder::OnFillThisBuffer(
if (info_.stream_info.surface_type == VideoFrame::TYPE_SYSTEM_MEMORY) {
pending_output_requests_++;
+ if (!output_transfer_buffer_busy_) {
+ output_transfer_buffer_busy_ = true;
+ decode_engine_->ProduceVideoFrame(frame_);
+ }
} else {
+ // TODO(hclam): I need to rethink how to delegate calls to
+ // VideoDecodeEngine, I may need to create a GpuVideoDecodeContext that
+ // provides a method for me to make calls to VideoDecodeEngine with the
+ // correct VideoFrame.
+ DCHECK_EQ(VideoFrame::TYPE_GL_TEXTURE, info_.stream_info.surface_type);
+
+ scoped_refptr<media::VideoFrame> frame;
+ VideoFrame::GlTexture textures[3] = { param.texture, 0, 0 };
+
+ media::VideoFrame:: CreateFrameGlTexture(
+ media::VideoFrame::RGBA, config_.width, config_.height, textures,
+ base::TimeDelta(), base::TimeDelta(), &frame);
+ decode_engine_->ProduceVideoFrame(frame);
}
}
void GpuVideoDecoder::OnFillThisBufferDoneACK() {
if (info_.stream_info.surface_type == VideoFrame::TYPE_SYSTEM_MEMORY) {
+ output_transfer_buffer_busy_ = false;
pending_output_requests_--;
if (pending_output_requests_) {
+ output_transfer_buffer_busy_ = true;
decode_engine_->ProduceVideoFrame(frame_);
}
}
}
-void GpuVideoDecoder::OnVideoFrameAllocated(int32 frame_id,
- std::vector<uint32> textures) {
- // This method is called in response to a video frame allocation request sent
- // to the Renderer process.
- // We should use the textures to generate a VideoFrame by using
- // GpuVideoDevice. The VideoFrame created is added to the internal map.
- // If we have generated enough VideoFrame, we call |allocation_callack_| to
- // complete the allocation process.
- media::VideoFrame::GlTexture gl_textures[media::VideoFrame::kMaxPlanes];
- memset(gl_textures, 0, sizeof(gl_textures));
- for (size_t i = 0; i < textures.size(); ++i) {
- // Translate the client texture id to service texture id.
- bool ret = gles2_decoder_->GetServiceTextureId(textures[i],
- gl_textures + i);
- DCHECK(ret) << "Cannot translate client texture ID to service ID";
- }
-
- scoped_refptr<media::VideoFrame> frame;
- bool ret = video_device_->CreateVideoFrameFromGlTextures(
- pending_allocation_->width, pending_allocation_->height,
- pending_allocation_->format, gl_textures, &frame);
-
- DCHECK(ret) << "Failed to allocation VideoFrame from GL textures)";
- pending_allocation_->frames->push_back(frame);
- video_frame_map_.insert(std::make_pair(frame_id, frame));
-
- if (video_frame_map_.size() == pending_allocation_->n) {
- pending_allocation_->task->Run();
- delete pending_allocation_->task;
- pending_allocation_.reset();
- }
-}
-
void GpuVideoDecoder::SendInitializeDone(
const GpuVideoDecoderInitDoneParam& param) {
if (!channel_->Send(
- new GpuVideoDecoderHostMsg_InitializeACK(route_id(), param))) {
+ new GpuVideoDecoderHostMsg_InitializeACK(route_id(), param))) {
LOG(ERROR) << "GpuVideoDecoderMsg_InitializeACK failed";
}
}
@@ -299,14 +346,14 @@ void GpuVideoDecoder::SendFlushDone() {
void GpuVideoDecoder::SendEmptyBufferDone() {
if (!channel_->Send(
- new GpuVideoDecoderHostMsg_EmptyThisBufferDone(route_id()))) {
+ new GpuVideoDecoderHostMsg_EmptyThisBufferDone(route_id()))) {
LOG(ERROR) << "GpuVideoDecoderMsg_EmptyThisBufferDone failed";
}
}
void GpuVideoDecoder::SendEmptyBufferACK() {
if (!channel_->Send(
- new GpuVideoDecoderHostMsg_EmptyThisBufferACK(route_id()))) {
+ new GpuVideoDecoderHostMsg_EmptyThisBufferACK(route_id()))) {
LOG(ERROR) << "GpuVideoDecoderMsg_EmptyThisBufferACK failed";
}
}
@@ -314,23 +361,7 @@ void GpuVideoDecoder::SendEmptyBufferACK() {
void GpuVideoDecoder::SendFillBufferDone(
const GpuVideoDecoderOutputBufferParam& param) {
if (!channel_->Send(
- new GpuVideoDecoderHostMsg_FillThisBufferDone(route_id(), param))) {
+ new GpuVideoDecoderHostMsg_FillThisBufferDone(route_id(), param))) {
LOG(ERROR) << "GpuVideoDecoderMsg_FillThisBufferDone failed";
}
}
-
-void GpuVideoDecoder::SendAllocateVideoFrames(
- int n, size_t width, size_t height, media::VideoFrame::Format format) {
- if (!channel_->Send(
- new GpuVideoDecoderHostMsg_AllocateVideoFrames(
- route_id(), n, width, height, format))) {
- LOG(ERROR) << "GpuVideoDecoderMsg_AllocateVideoFrames failed";
- }
-}
-
-void GpuVideoDecoder::SendReleaseAllVideoFrames() {
- if (!channel_->Send(
- new GpuVideoDecoderHostMsg_ReleaseAllVideoFrames(route_id()))) {
- LOG(ERROR) << "GpuVideoDecoderMsg_ReleaseAllVideoFrames failed";
- }
-}
diff --git a/chrome/gpu/gpu_video_decoder.h b/chrome/gpu/gpu_video_decoder.h
index 9f77700..7aabffa 100644
--- a/chrome/gpu/gpu_video_decoder.h
+++ b/chrome/gpu/gpu_video_decoder.h
@@ -5,9 +5,6 @@
#ifndef CHROME_GPU_GPU_VIDEO_DECODER_H_
#define CHROME_GPU_GPU_VIDEO_DECODER_H_
-#include <map>
-#include <vector>
-
#include "base/basictypes.h"
#include "base/callback.h"
#include "base/ref_counted.h"
@@ -78,8 +75,8 @@ class GpuChannel;
// will maintain such mapping.
//
class GpuVideoDecoder
- : public base::RefCountedThreadSafe<GpuVideoDecoder>,
- public IPC::Channel::Listener,
+ : public IPC::Channel::Listener,
+ public base::RefCountedThreadSafe<GpuVideoDecoder>,
public media::VideoDecodeEngine::EventHandler,
public media::VideoDecodeContext {
@@ -101,11 +98,10 @@ class GpuVideoDecoder
// VideoDecodeContext implementation.
virtual void* GetDevice();
- virtual void AllocateVideoFrames(
- int n, size_t width, size_t height, media::VideoFrame::Format format,
- std::vector<scoped_refptr<media::VideoFrame> >* frames, Task* task);
- virtual void ReleaseAllVideoFrames();
- virtual void Destroy(Task* task);
+ virtual void AllocateVideoFrames(int n, size_t width, size_t height,
+ AllocationCompleteCallback* callback);
+ virtual void ReleaseVideoFrames(int n, VideoFrame* frames);
+ virtual void Destroy(DestructionCompleteCallback* callback);
// Constructor and destructor.
GpuVideoDecoder(const GpuVideoDecoderInfoParam* param,
@@ -115,45 +111,19 @@ class GpuVideoDecoder
virtual ~GpuVideoDecoder() {}
private:
- struct PendingAllocation {
- size_t n;
- size_t width;
- size_t height;
- media::VideoFrame::Format format;
- std::vector<scoped_refptr<media::VideoFrame> >* frames;
- Task* task;
- };
-
int32 route_id() { return decoder_host_route_id_; }
bool CreateInputTransferBuffer(uint32 size,
base::SharedMemoryHandle* handle);
-
- // These methods are message handlers for the messages sent from the Renderer
- // process.
- void OnInitialize(const GpuVideoDecoderInitParam& param);
- void OnUninitialize();
- void OnFlush();
- void OnEmptyThisBuffer(const GpuVideoDecoderInputBufferParam& buffer);
- void OnFillThisBuffer(const GpuVideoDecoderOutputBufferParam& param);
- void OnFillThisBufferDoneACK();
- void OnVideoFrameAllocated(int32 frame_id, std::vector<uint32> textures);
-
- // Helper methods for sending messages to the Renderer process.
- void SendInitializeDone(const GpuVideoDecoderInitDoneParam& param);
- void SendUninitializeDone();
- void SendFlushDone();
- void SendEmptyBufferDone();
- void SendEmptyBufferACK();
- void SendFillBufferDone(const GpuVideoDecoderOutputBufferParam& param);
- void SendAllocateVideoFrames(
- int n, size_t width, size_t height, media::VideoFrame::Format format);
- void SendReleaseAllVideoFrames();
+ bool CreateOutputTransferBuffer(uint32 size,
+ base::SharedMemoryHandle* handle);
+ void CreateVideoFrameOnTransferBuffer();
int32 decoder_host_route_id_;
// Used only in system memory path. i.e. Remove this later.
scoped_refptr<VideoFrame> frame_;
+ bool output_transfer_buffer_busy_;
int32 pending_output_requests_;
GpuChannel* channel_;
@@ -163,27 +133,30 @@ class GpuVideoDecoder
// is used to switch context and translate client texture ID to service ID.
gpu::gles2::GLES2Decoder* gles2_decoder_;
- // Memory for transfering the input data for the hardware video decoder.
scoped_ptr<base::SharedMemory> input_transfer_buffer_;
+ scoped_ptr<base::SharedMemory> output_transfer_buffer_;
- // VideoDecodeEngine is used to do the actual video decoding.
scoped_ptr<media::VideoDecodeEngine> decode_engine_;
-
- // GpuVideoDevice is used to generate VideoFrame(s) from GL textures. The
- // frames generated are understood by the decode engine.
- scoped_ptr<GpuVideoDevice> video_device_;
-
- // Contain information for allocation VideoFrame(s).
- scoped_ptr<PendingAllocation> pending_allocation_;
-
- // Contains the mapping between a |frame_id| and VideoFrame generated by
- // GpuVideoDevice from the associated GL textures.
- typedef std::map<int32, scoped_refptr<media::VideoFrame> > VideoFrameMap;
- VideoFrameMap video_frame_map_;
-
+ scoped_ptr<GpuVideoDevice> decode_context_;
media::VideoCodecConfig config_;
media::VideoCodecInfo info_;
+ // Input message handler.
+ void OnInitialize(const GpuVideoDecoderInitParam& param);
+ void OnUninitialize();
+ void OnFlush();
+ void OnEmptyThisBuffer(const GpuVideoDecoderInputBufferParam& buffer);
+ void OnFillThisBuffer(const GpuVideoDecoderOutputBufferParam& param);
+ void OnFillThisBufferDoneACK();
+
+ // Output message helper.
+ void SendInitializeDone(const GpuVideoDecoderInitDoneParam& param);
+ void SendUninitializeDone();
+ void SendFlushDone();
+ void SendEmptyBufferDone();
+ void SendEmptyBufferACK();
+ void SendFillBufferDone(const GpuVideoDecoderOutputBufferParam& param);
+
DISALLOW_COPY_AND_ASSIGN(GpuVideoDecoder);
};
diff --git a/chrome/renderer/gpu_video_decoder_host.cc b/chrome/renderer/gpu_video_decoder_host.cc
index 771c38a..a585ede 100644
--- a/chrome/renderer/gpu_video_decoder_host.cc
+++ b/chrome/renderer/gpu_video_decoder_host.cc
@@ -109,9 +109,12 @@ void GpuVideoDecoderHost::FillThisBuffer(scoped_refptr<VideoFrame> frame) {
// TODO(hclam): We should keep an IDMap to convert between a frame a buffer
// ID so that we can signal GpuVideoDecoder in GPU process to use the buffer.
// This eliminates one conversion step.
- // TODO(hclam): Fill the param.
GpuVideoDecoderOutputBufferParam param;
+ // TODO(hclam): This is a hack to pass the texture id to the hardware video
+ // decoder. We should have created a mapping between VideoFrame and buffer id
+ // and we pass the buffer id to the GPU process.
+ param.texture = frame->gl_texture(VideoFrame::kRGBPlane);
if (!channel_host_ || !channel_host_->Send(
new GpuVideoDecoderMsg_FillThisBuffer(route_id(), param))) {
LOG(ERROR) << "GpuVideoDecoderMsg_FillThisBuffer failed";
@@ -149,6 +152,13 @@ void GpuVideoDecoderHost::OnInitializeDone(
if (!input_transfer_buffer_->Map(param.input_buffer_size))
break;
+ if (!base::SharedMemory::IsHandleValid(param.output_buffer_handle))
+ break;
+ output_transfer_buffer_.reset(
+ new base::SharedMemory(param.output_buffer_handle, false));
+ if (!output_transfer_buffer_->Map(param.output_buffer_size))
+ break;
+
success = true;
} while (0);
@@ -158,6 +168,7 @@ void GpuVideoDecoderHost::OnInitializeDone(
void GpuVideoDecoderHost::OnUninitializeDone() {
input_transfer_buffer_.reset();
+ output_transfer_buffer_.reset();
event_handler_->OnUninitializeDone();
}
@@ -178,11 +189,27 @@ void GpuVideoDecoderHost::OnFillThisBufferDone(
if (param.flags & GpuVideoDecoderOutputBufferParam::kFlagsEndOfStream) {
VideoFrame::CreateEmptyFrame(&frame);
- } else {
+ } else if (done_param_.surface_type ==
+ media::VideoFrame::TYPE_SYSTEM_MEMORY) {
+ VideoFrame::CreateFrame(VideoFrame::YV12,
+ init_param_.width,
+ init_param_.height,
+ base::TimeDelta::FromMicroseconds(param.timestamp),
+ base::TimeDelta::FromMicroseconds(param.duration),
+ &frame);
+ uint8* src = static_cast<uint8*>(output_transfer_buffer_->memory());
+ uint8* data0 = frame->data(0);
+ uint8* data1 = frame->data(1);
+ uint8* data2 = frame->data(2);
+ int32 size = init_param_.width * init_param_.height;
+ memcpy(data0, src, size);
+ memcpy(data1, src + size, size / 4);
+ memcpy(data2, src + size + size / 4, size / 4);
+ } else if (done_param_.surface_type == media::VideoFrame::TYPE_GL_TEXTURE) {
// TODO(hclam): The logic in buffer allocation is pretty much around
- // using shared memory for output buffer which needs to be adjusted.
- // Fake the texture ID until we implement it properly.
- VideoFrame::GlTexture textures[3] = { 0, 0, 0 };
+ // using shared memory for output buffer which needs to be adjusted. For
+ // now we have to add this hack to get the texture id.
+ VideoFrame::GlTexture textures[3] = { param.texture, 0, 0 };
media::VideoFrame::CreateFrameGlTexture(
media::VideoFrame::RGBA, init_param_.width, init_param_.height,
textures,
diff --git a/chrome/renderer/gpu_video_decoder_host.h b/chrome/renderer/gpu_video_decoder_host.h
index 1255bf5..02f8fc8 100644
--- a/chrome/renderer/gpu_video_decoder_host.h
+++ b/chrome/renderer/gpu_video_decoder_host.h
@@ -112,6 +112,7 @@ class GpuVideoDecoderHost
// Transfer buffers for both input and output.
// TODO(jiesun): remove output buffer when hardware composition is ready.
scoped_ptr<base::SharedMemory> input_transfer_buffer_;
+ scoped_ptr<base::SharedMemory> output_transfer_buffer_;
DISALLOW_COPY_AND_ASSIGN(GpuVideoDecoderHost);
};
diff --git a/chrome/renderer/media/gles2_video_decode_context.cc b/chrome/renderer/media/gles2_video_decode_context.cc
index 2c04282..075f80a 100644
--- a/chrome/renderer/media/gles2_video_decode_context.cc
+++ b/chrome/renderer/media/gles2_video_decode_context.cc
@@ -20,15 +20,15 @@ void* Gles2VideoDecodeContext::GetDevice() {
}
void Gles2VideoDecodeContext::AllocateVideoFrames(
- int n, size_t width, size_t height, media::VideoFrame::Format format,
- std::vector<scoped_refptr<media::VideoFrame> >* frames, Task* task) {
+ int n, size_t width, size_t height, AllocationCompleteCallback* callback) {
// TODO(hclam): Implement.
}
-void Gles2VideoDecodeContext::ReleaseAllVideoFrames() {
+void Gles2VideoDecodeContext::ReleaseVideoFrames(int n,
+ media::VideoFrame* frames) {
// TODO(hclam): Implement.
}
-void Gles2VideoDecodeContext::Destroy(Task* task) {
+void Gles2VideoDecodeContext::Destroy(DestructionCompleteCallback* callback) {
// TODO(hclam): Implement.
}
diff --git a/chrome/renderer/media/gles2_video_decode_context.h b/chrome/renderer/media/gles2_video_decode_context.h
index 35958b6..e087bb3 100644
--- a/chrome/renderer/media/gles2_video_decode_context.h
+++ b/chrome/renderer/media/gles2_video_decode_context.h
@@ -95,11 +95,10 @@ class Gles2VideoDecodeContext : public media::VideoDecodeContext {
// media::VideoDecodeContext implementation.
virtual void* GetDevice();
- virtual void AllocateVideoFrames(
- int n, size_t width, size_t height, media::VideoFrame::Format format,
- std::vector<scoped_refptr<media::VideoFrame> >* frames, Task* task);
- virtual void ReleaseAllVideoFrames();
- virtual void Destroy(Task* task) = 0;
+ virtual void AllocateVideoFrames(int n, size_t width, size_t height,
+ AllocationCompleteCallback* callback);
+ virtual void ReleaseVideoFrames(int n, media::VideoFrame* frames);
+ virtual void Destroy(DestructionCompleteCallback* callback);
//--------------------------------------------------------------------------
// Any thread
diff --git a/chrome/renderer/media/ipc_video_decoder.cc b/chrome/renderer/media/ipc_video_decoder.cc
index eda8696..0b6896b 100644
--- a/chrome/renderer/media/ipc_video_decoder.cc
+++ b/chrome/renderer/media/ipc_video_decoder.cc
@@ -111,9 +111,10 @@ void IpcVideoDecoder::OnInitializeDone(
media::mime_type::kUncompressedVideo);
media_format_.SetAsInteger(media::MediaFormat::kWidth, width_);
media_format_.SetAsInteger(media::MediaFormat::kHeight, height_);
- media_format_.SetAsInteger(
- media::MediaFormat::kSurfaceType,
- static_cast<int>(media::VideoFrame::TYPE_GL_TEXTURE));
+ media_format_.SetAsInteger(media::MediaFormat::kSurfaceType,
+ static_cast<int>(param.surface_type));
+ media_format_.SetAsInteger(media::MediaFormat::kSurfaceFormat,
+ static_cast<int>(param.format));
state_ = kPlaying;
} else {
LOG(ERROR) << "IpcVideoDecoder initialization failed!";
diff --git a/gpu/command_buffer/service/gles2_cmd_decoder.cc b/gpu/command_buffer/service/gles2_cmd_decoder.cc
index 5b9fc40..183fb03 100644
--- a/gpu/command_buffer/service/gles2_cmd_decoder.cc
+++ b/gpu/command_buffer/service/gles2_cmd_decoder.cc
@@ -550,8 +550,6 @@ class GLES2DecoderImpl : public base::SupportsWeakPtr<GLES2DecoderImpl>,
virtual gfx::GLContext* GetGLContext() { return context_.get(); }
virtual void SetSwapBuffersCallback(Callback0::Type* callback);
- virtual bool GetServiceTextureId(uint32 client_texture_id,
- uint32* service_texture_id);
// Restores the current state to the user's settings.
void RestoreCurrentFramebufferBindings();
@@ -2077,17 +2075,6 @@ void GLES2DecoderImpl::SetSwapBuffersCallback(Callback0::Type* callback) {
swap_buffers_callback_.reset(callback);
}
-bool GLES2DecoderImpl::GetServiceTextureId(uint32 client_texture_id,
- uint32* service_texture_id) {
- TextureManager::TextureInfo* texture =
- texture_manager()->GetTextureInfo(client_texture_id);
- if (texture) {
- *service_texture_id = texture->service_id();
- return true;
- }
- return false;
-}
-
void GLES2DecoderImpl::Destroy() {
if (vertex_compiler_ != NULL) {
ShDestruct(vertex_compiler_);
diff --git a/gpu/command_buffer/service/gles2_cmd_decoder.h b/gpu/command_buffer/service/gles2_cmd_decoder.h
index 694fa37..e94df94 100644
--- a/gpu/command_buffer/service/gles2_cmd_decoder.h
+++ b/gpu/command_buffer/service/gles2_cmd_decoder.h
@@ -82,13 +82,6 @@ class GLES2Decoder : public CommonDecoder {
// Sets a callback which is called when a SwapBuffers command is processed.
virtual void SetSwapBuffersCallback(Callback0::Type* callback) = 0;
- // Get the service texture ID corresponding to a client texture ID.
- // If no such record is found then return false.
- virtual bool GetServiceTextureId(uint32 client_texture_id,
- uint32* service_texture_id) {
- return false;
- }
-
protected:
explicit GLES2Decoder(ContextGroup* group);
diff --git a/media/video/video_decode_context.h b/media/video/video_decode_context.h
index 795b136..0bea382 100644
--- a/media/video/video_decode_context.h
+++ b/media/video/video_decode_context.h
@@ -5,23 +5,20 @@
#ifndef MEDIA_VIDEO_VIDEO_DECODE_CONTEXT_H_
#define MEDIA_VIDEO_VIDEO_DECODE_CONTEXT_H_
-#include <vector>
-
-#include "base/task.h"
-#include "media/base/video_frame.h"
+#include "base/callback.h"
namespace media {
class VideoFrame;
-// A VideoDecodeContext is used by VideoDecodeEngine to provide the following
-// functions:
-//
-// 1. Provides access to hardware video decoding device.
-// 2. Allocate VideoFrame objects that are used to carry the decoded video
-// frames.
+// A VideoDecodeContext provides resources like output video frame storage and
+// hardware decoder handle to a VideoDecodeEngine, it hides all the platform and
+// subsystem details from the decode engine.
class VideoDecodeContext {
public:
+ typedef Callback2<int, VideoFrame*[]>::Type AllocationCompleteCallback;
+ typedef Callback0::Type DestructionCompleteCallback;
+
virtual ~VideoDecodeContext() {};
// Obtain a handle to the hardware video decoder device. The type of the
@@ -31,26 +28,22 @@ class VideoDecodeContext {
// If a hardware device is not needed this method should return NULL.
virtual void* GetDevice() = 0;
- // Allocate |n| video frames with dimension |width| and |height|. |task|
+ // Allocate |n| video frames with dimension |width| and |height|. |callback|
// is called when allocation has completed.
- //
- // |frames| is the output parameter for VideFrame(s) allocated.
- virtual void AllocateVideoFrames(
- int n, size_t width, size_t height, VideoFrame::Format format,
- std::vector<scoped_refptr<VideoFrame> >* frames,
- Task* task) = 0;
+ virtual void AllocateVideoFrames(int n, size_t width, size_t height,
+ AllocationCompleteCallback* callback) = 0;
- // Release all video frames allocated by the context. After making this call
+ // Release video frames allocated by the context. After making this call
// VideoDecodeEngine should not use the VideoFrame allocated because they
// could be destroyed.
- virtual void ReleaseAllVideoFrames() = 0;
+ virtual void ReleaseVideoFrames(int n, VideoFrame* frames) = 0;
- // Destroy this context asynchronously. When the operation is done |task|
+ // Destroy this context asynchronously. When the operation is done |callback|
// is called.
//
// ReleaseVideoFrames() need to be called with all the video frames allocated
// before making this call.
- virtual void Destroy(Task* task) = 0;
+ virtual void Destroy(DestructionCompleteCallback* callback) = 0;
};
} // namespace media