summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorhclam@chromium.org <hclam@chromium.org@0039d316-1c4b-4281-b951-d872f2087c98>2010-09-17 22:03:16 +0000
committerhclam@chromium.org <hclam@chromium.org@0039d316-1c4b-4281-b951-d872f2087c98>2010-09-17 22:03:16 +0000
commit1318e92f70e240d7ae71320ea7e4fcae18f2ce3e (patch)
tree412ee9192a850d9867bea3a841d800f67081ebe3
parent9fcd39385ae39a68d3509238bd9ef83af1868fc7 (diff)
downloadchromium_src-1318e92f70e240d7ae71320ea7e4fcae18f2ce3e.zip
chromium_src-1318e92f70e240d7ae71320ea7e4fcae18f2ce3e.tar.gz
chromium_src-1318e92f70e240d7ae71320ea7e4fcae18f2ce3e.tar.bz2
Resubmit GpuVideoDecoder and related patches.
BUG=53714 TEST=Tree is green Review URL: http://codereview.chromium.org/3442006 git-svn-id: svn://svn.chromium.org/chrome/trunk/src@59860 0039d316-1c4b-4281-b951-d872f2087c98
-rw-r--r--chrome/chrome.gyp3
-rw-r--r--chrome/common/gpu_messages_internal.h2
-rw-r--r--chrome/common/gpu_video_common.cc35
-rw-r--r--chrome/common/gpu_video_common.h18
-rw-r--r--chrome/gpu/gpu_video_decoder.cc211
-rw-r--r--chrome/gpu/gpu_video_decoder.h104
-rw-r--r--chrome/gpu/media/fake_gl_video_decode_engine.cc50
-rw-r--r--chrome/gpu/media/fake_gl_video_decode_engine.h20
-rw-r--r--chrome/gpu/media/fake_gl_video_device.cc60
-rw-r--r--chrome/gpu/media/fake_gl_video_device.h27
-rw-r--r--chrome/gpu/media/gpu_video_device.h17
-rw-r--r--chrome/renderer/gpu_video_decoder_host.cc37
-rw-r--r--chrome/renderer/gpu_video_decoder_host.h1
-rw-r--r--chrome/renderer/media/gles2_video_decode_context.cc13
-rw-r--r--chrome/renderer/media/gles2_video_decode_context.h12
-rw-r--r--chrome/renderer/media/ipc_video_decoder.cc7
-rw-r--r--gpu/command_buffer/service/gles2_cmd_decoder.cc13
-rw-r--r--gpu/command_buffer/service/gles2_cmd_decoder.h7
-rw-r--r--media/filters/ffmpeg_video_decoder.cc2
-rw-r--r--media/filters/ffmpeg_video_decoder_unittest.cc8
-rw-r--r--media/filters/omx_video_decoder.cc2
-rw-r--r--media/mf/mft_h264_decoder.cc1
-rw-r--r--media/mf/mft_h264_decoder.h3
-rw-r--r--media/mf/mft_h264_decoder_example.cc6
-rw-r--r--media/mf/test/mft_h264_decoder_unittest.cc22
-rw-r--r--media/tools/omx_test/omx_test.cc2
-rw-r--r--media/video/ffmpeg_video_decode_engine.cc1
-rw-r--r--media/video/ffmpeg_video_decode_engine.h1
-rw-r--r--media/video/ffmpeg_video_decode_engine_unittest.cc8
-rw-r--r--media/video/omx_video_decode_engine.cc1
-rw-r--r--media/video/omx_video_decode_engine.h1
-rw-r--r--media/video/video_decode_context.h63
-rw-r--r--media/video/video_decode_engine.h17
33 files changed, 484 insertions, 291 deletions
diff --git a/chrome/chrome.gyp b/chrome/chrome.gyp
index d561e3a..7554cb7 100644
--- a/chrome/chrome.gyp
+++ b/chrome/chrome.gyp
@@ -693,6 +693,7 @@
'../app/app.gyp:app_base',
'../base/base.gyp:base',
'common',
+ '../media/media.gyp:media',
'../skia/skia.gyp:skia',
],
'sources': [
@@ -721,6 +722,8 @@
'gpu/media/gpu_video_device.h',
'gpu/media/fake_gl_video_decode_engine.cc',
'gpu/media/fake_gl_video_decode_engine.h',
+ 'gpu/media/fake_gl_video_device.cc',
+ 'gpu/media/fake_gl_video_device.h',
],
'include_dirs': [
'..',
diff --git a/chrome/common/gpu_messages_internal.h b/chrome/common/gpu_messages_internal.h
index 5a4d505..229e3db 100644
--- a/chrome/common/gpu_messages_internal.h
+++ b/chrome/common/gpu_messages_internal.h
@@ -336,7 +336,7 @@ IPC_BEGIN_MESSAGES(GpuVideoDecoderHost)
GpuVideoDecoderOutputBufferParam)
// Allocate video frames for output of the hardware video decoder.
- IPC_MESSAGE_ROUTED4(GpuVideoDecoderHostMsg_AllocateVideoFrame,
+ IPC_MESSAGE_ROUTED4(GpuVideoDecoderHostMsg_AllocateVideoFrames,
int32, /* Numer of video frames to generate */
int32, /* Width of the video frame */
int32, /* Height of the video frame */
diff --git a/chrome/common/gpu_video_common.cc b/chrome/common/gpu_video_common.cc
index 826ef20..5c2e9257 100644
--- a/chrome/common/gpu_video_common.cc
+++ b/chrome/common/gpu_video_common.cc
@@ -86,32 +86,22 @@ void ParamTraits<GpuVideoDecoderInitParam>::Log(
void ParamTraits<GpuVideoDecoderInitDoneParam>::Write(
Message* m, const GpuVideoDecoderInitDoneParam& p) {
WriteParam(m, p.success);
- WriteParam(m, p.stride);
- WriteParam(m, p.format);
- WriteParam(m, p.surface_type);
WriteParam(m, p.input_buffer_size);
- WriteParam(m, p.output_buffer_size);
WriteParam(m, p.input_buffer_handle);
- WriteParam(m, p.output_buffer_handle);
}
bool ParamTraits<GpuVideoDecoderInitDoneParam>::Read(
const Message* m, void** iter, GpuVideoDecoderInitDoneParam* r) {
if (!ReadParam(m, iter, &r->success) ||
- !ReadParam(m, iter, &r->stride) ||
- !ReadParam(m, iter, &r->format) ||
- !ReadParam(m, iter, &r->surface_type) ||
!ReadParam(m, iter, &r->input_buffer_size) ||
- !ReadParam(m, iter, &r->output_buffer_size) ||
- !ReadParam(m, iter, &r->input_buffer_handle) ||
- !ReadParam(m, iter, &r->output_buffer_handle))
+ !ReadParam(m, iter, &r->input_buffer_handle))
return false;
return true;
}
void ParamTraits<GpuVideoDecoderInitDoneParam>::Log(
const GpuVideoDecoderInitDoneParam& p, std::string* l) {
- l->append(StringPrintf("(%d)", p.stride));
+ l->append(StringPrintf("(%d %d)", p.success, p.input_buffer_size));
}
///////////////////////////////////////////////////////////////////////////////
@@ -143,29 +133,29 @@ void ParamTraits<GpuVideoDecoderInputBufferParam>::Log(
void ParamTraits<GpuVideoDecoderOutputBufferParam>::Write(
Message* m, const GpuVideoDecoderOutputBufferParam& p) {
+ WriteParam(m, p.frame_id);
WriteParam(m, p.timestamp);
WriteParam(m, p.duration);
WriteParam(m, p.flags);
- WriteParam(m, p.texture);
}
bool ParamTraits<GpuVideoDecoderOutputBufferParam>::Read(
const Message* m, void** iter, GpuVideoDecoderOutputBufferParam* r) {
- if (!ReadParam(m, iter, &r->timestamp) ||
+ if (!ReadParam(m, iter, &r->frame_id) ||
+ !ReadParam(m, iter, &r->timestamp) ||
!ReadParam(m, iter, &r->duration) ||
- !ReadParam(m, iter, &r->flags) ||
- !ReadParam(m, iter, &r->texture))
+ !ReadParam(m, iter, &r->flags))
return false;
return true;
}
void ParamTraits<GpuVideoDecoderOutputBufferParam>::Log(
const GpuVideoDecoderOutputBufferParam& p, std::string* l) {
- l->append(StringPrintf("(%d %d) %x texture = x%d",
+ l->append(StringPrintf("(%d %d %d %x)",
+ p.frame_id,
static_cast<int>(p.timestamp),
static_cast<int>(p.duration),
- p.flags,
- p.texture));
+ p.flags));
}
///////////////////////////////////////////////////////////////////////////////
@@ -192,21 +182,18 @@ void ParamTraits<GpuVideoDecoderErrorInfoParam>::Log(
void ParamTraits<GpuVideoDecoderFormatChangeParam>::Write(
Message* m, const GpuVideoDecoderFormatChangeParam& p) {
WriteParam(m, p.input_buffer_size);
- WriteParam(m, p.output_buffer_size);
}
bool ParamTraits<GpuVideoDecoderFormatChangeParam>::Read(
const Message* m, void** iter, GpuVideoDecoderFormatChangeParam* r) {
- if (!ReadParam(m, iter, &r->input_buffer_size) ||
- !ReadParam(m, iter, &r->output_buffer_size))
+ if (!ReadParam(m, iter, &r->input_buffer_size))
return false;
return true;
}
void ParamTraits<GpuVideoDecoderFormatChangeParam>::Log(
const GpuVideoDecoderFormatChangeParam& p, std::string* l) {
- l->append(StringPrintf("(%d %d)", p.input_buffer_size,
- p.output_buffer_size));
+ l->append(StringPrintf("%d", p.input_buffer_size));
}
///////////////////////////////////////////////////////////////////////////////
diff --git a/chrome/common/gpu_video_common.h b/chrome/common/gpu_video_common.h
index ee3f1e3..8102f3d 100644
--- a/chrome/common/gpu_video_common.h
+++ b/chrome/common/gpu_video_common.h
@@ -48,15 +48,8 @@ struct GpuVideoDecoderInitParam {
struct GpuVideoDecoderInitDoneParam {
int32 success; // other parameter is only meaningful when this is true.
- int32 provides_buffer;
- media::VideoFrame::Format format;
- int32 surface_type; // TODO(hclam): Remove this. We only pass GL textures.
- int32 stride;
int32 input_buffer_size;
- int32 output_buffer_size;
base::SharedMemoryHandle input_buffer_handle;
- // we do not need this if hardware composition is ready.
- base::SharedMemoryHandle output_buffer_handle;
};
struct GpuVideoDecoderInputBufferParam {
@@ -66,16 +59,14 @@ struct GpuVideoDecoderInputBufferParam {
int32 flags; // miscellaneous flag bit mask
};
+// A message that contains formation of a video frame that is ready to be
+// rendered by the Renderer process.
struct GpuVideoDecoderOutputBufferParam {
+ int32 frame_id; // ID of the video frame that is ready to be rendered.
int64 timestamp; // In unit of microseconds.
int64 duration; // In unit of microseconds.
int32 flags; // miscellaneous flag bit mask
- // TODO(hclam): This is really ugly and should be removed. Instead of sending
- // a texture id we should send a buffer id that signals that a buffer is ready
- // to be consumed. Before that we need API to establish the buffers.
- int32 texture;
-
enum {
kFlagsEndOfStream = 0x00000001,
kFlagsDiscontinuous = 0x00000002,
@@ -88,11 +79,8 @@ struct GpuVideoDecoderErrorInfoParam {
// TODO(jiesun): define this.
struct GpuVideoDecoderFormatChangeParam {
- int32 stride;
int32 input_buffer_size;
- int32 output_buffer_size;
base::SharedMemoryHandle input_buffer_handle;
- base::SharedMemoryHandle output_buffer_handle;
};
namespace IPC {
diff --git a/chrome/gpu/gpu_video_decoder.cc b/chrome/gpu/gpu_video_decoder.cc
index bcdce82..bf5881ed 100644
--- a/chrome/gpu/gpu_video_decoder.cc
+++ b/chrome/gpu/gpu_video_decoder.cc
@@ -4,9 +4,11 @@
#include "chrome/gpu/gpu_video_decoder.h"
+#include "chrome/common/child_thread.h"
#include "chrome/common/gpu_messages.h"
#include "chrome/gpu/gpu_channel.h"
#include "chrome/gpu/media/fake_gl_video_decode_engine.h"
+#include "chrome/gpu/media/fake_gl_video_device.h"
#include "media/base/data_buffer.h"
#include "media/base/video_frame.h"
@@ -53,63 +55,17 @@ bool GpuVideoDecoder::CreateInputTransferBuffer(
return true;
}
-bool GpuVideoDecoder::CreateOutputTransferBuffer(
- uint32 size,
- base::SharedMemoryHandle* handle) {
- output_transfer_buffer_.reset(new base::SharedMemory);
- if (!output_transfer_buffer_.get())
- return false;
-
- if (!output_transfer_buffer_->Create(std::wstring(), false, false, size))
- return false;
-
- if (!output_transfer_buffer_->Map(size))
- return false;
-
- if (!output_transfer_buffer_->ShareToProcess(renderer_handle_, handle))
- return false;
-
- return true;
-}
-
-void GpuVideoDecoder::CreateVideoFrameOnTransferBuffer() {
- const base::TimeDelta kZero;
- uint8* data[media::VideoFrame::kMaxPlanes];
- int32 strides[media::VideoFrame::kMaxPlanes];
- memset(data, 0, sizeof(data));
- memset(strides, 0, sizeof(strides));
- data[0] = static_cast<uint8*>(output_transfer_buffer_->memory());
- data[1] = data[0] + config_.width * config_.height;
- data[2] = data[1] + config_.width * config_.height / 4;
- strides[0] = config_.width;
- strides[1] = strides[2] = config_.width >> 1;
- media::VideoFrame:: CreateFrameExternal(
- media::VideoFrame::TYPE_SYSTEM_MEMORY,
- media::VideoFrame::YV12,
- config_.width, config_.height, 3,
- data, strides,
- kZero, kZero,
- NULL,
- &frame_);
-}
-
void GpuVideoDecoder::OnInitializeComplete(const VideoCodecInfo& info) {
info_ = info;
GpuVideoDecoderInitDoneParam param;
param.success = false;
param.input_buffer_handle = base::SharedMemory::NULLHandle();
- param.output_buffer_handle = base::SharedMemory::NULLHandle();
if (!info.success) {
SendInitializeDone(param);
return;
}
- // Translate surface type.
- // TODO(hclam): Remove |surface_type| since we are always passing textures.
- param.surface_type = static_cast<int>(info.stream_info.surface_type);
- param.format = info.stream_info.surface_format;
-
// TODO(jiesun): Check the assumption of input size < original size.
param.input_buffer_size = config_.width * config_.height * 3 / 2;
if (!CreateInputTransferBuffer(param.input_buffer_size,
@@ -118,31 +74,7 @@ void GpuVideoDecoder::OnInitializeComplete(const VideoCodecInfo& info) {
return;
}
- if (info.stream_info.surface_type == VideoFrame::TYPE_SYSTEM_MEMORY) {
- // TODO(jiesun): Allocate this according to the surface format.
- // The format actually could change during streaming, we need to
- // notify GpuVideoDecoderHost side when this happened and renegotiate
- // the transfer buffer.
- switch (info.stream_info.surface_format) {
- case VideoFrame::YV12:
- // TODO(jiesun): take stride into account.
- param.output_buffer_size =
- config_.width * config_.height * 3 / 2;
- break;
- default:
- NOTREACHED();
- }
-
- if (!CreateOutputTransferBuffer(param.output_buffer_size,
- &param.output_buffer_handle)) {
- SendInitializeDone(param);
- return;
- }
- CreateVideoFrameOnTransferBuffer();
- }
-
param.success = true;
-
SendInitializeDone(param);
}
@@ -176,21 +108,20 @@ void GpuVideoDecoder::ConsumeVideoFrame(scoped_refptr<VideoFrame> frame) {
output_param.duration = frame->GetDuration().InMicroseconds();
output_param.flags = frame->IsEndOfStream() ?
GpuVideoDecoderOutputBufferParam::kFlagsEndOfStream : 0;
- // TODO(hclam): We should have the conversion between VideoFrame and the
- // IPC transport param done in GpuVideoDevice.
- // This is a hack to pass texture back as a param.
- output_param.texture = frame->gl_texture(media::VideoFrame::kRGBPlane);
SendFillBufferDone(output_param);
}
void* GpuVideoDecoder::GetDevice() {
+ bool ret = gles2_decoder_->MakeCurrent();
+ DCHECK(ret) << "Failed to switch context";
+
// Simply delegate the method call to GpuVideoDevice.
- return decode_context_->GetDevice();
+ return video_device_->GetDevice();
}
void GpuVideoDecoder::AllocateVideoFrames(
- int n, size_t width, size_t height,
- AllocationCompleteCallback* callback) {
+ int n, size_t width, size_t height, media::VideoFrame::Format format,
+ std::vector<scoped_refptr<media::VideoFrame> >* frames, Task* task) {
// Since the communication between Renderer and GPU process is by GL textures.
// We need to obtain a set of GL textures by sending IPC commands to the
// Renderer process. The recipient of these commands will be IpcVideoDecoder.
@@ -208,20 +139,56 @@ void GpuVideoDecoder::AllocateVideoFrames(
//
// Note that this method is called when there's no video frames allocated or
// they were all released.
+ DCHECK(video_frame_map_.empty());
+
+ // Save the parameters for allocation.
+ pending_allocation_.reset(new PendingAllocation());
+ pending_allocation_->n = n;
+ pending_allocation_->width = width;
+ pending_allocation_->height = height;
+ pending_allocation_->format = format;
+ pending_allocation_->frames = frames;
+ pending_allocation_->task = task;
+ SendAllocateVideoFrames(n, width, height, format);
}
-void GpuVideoDecoder::ReleaseVideoFrames(int n, VideoFrame* frames) {
+void GpuVideoDecoder::ReleaseAllVideoFrames() {
// This method will first call to GpuVideoDevice to release all the resource
// associated with a VideoFrame.
//
- // And when we'll call GpuVideoDevice::ReleaseVideoFrames to remove the set
+ // And then we'll call GpuVideoDevice::ReleaseVideoFrame() to remove the set
// of Gl textures associated with the context.
//
// And finally we'll send IPC commands to IpcVideoDecoder to destroy all
// GL textures generated.
+ bool ret = gles2_decoder_->MakeCurrent();
+ DCHECK(ret) << "Failed to switch context";
+
+ for (VideoFrameMap::iterator i = video_frame_map_.begin();
+ i != video_frame_map_.end(); ++i) {
+ video_device_->ReleaseVideoFrame(i->second);
+ }
+ video_frame_map_.clear();
+ SendReleaseAllVideoFrames();
}
-void GpuVideoDecoder::Destroy(DestructionCompleteCallback* callback) {
+void GpuVideoDecoder::UploadToVideoFrame(void* buffer,
+ scoped_refptr<media::VideoFrame> frame,
+ Task* task) {
+ // This method is called by VideoDecodeEngine to upload a buffer to a
+ // VideoFrame. We should just delegate this to GpuVideoDevice which contains
+ // the actual implementation.
+ bool ret = gles2_decoder_->MakeCurrent();
+ DCHECK(ret) << "Failed to switch context";
+
+ // Actually doing the upload on the main thread.
+ ret = video_device_->UploadToVideoFrame(buffer, frame);
+ DCHECK(ret) << "Failed to upload video content to a VideoFrame.";
+ task->Run();
+ delete task;
+}
+
+void GpuVideoDecoder::Destroy(Task* task) {
// TODO(hclam): I still need to think what I should do here.
}
@@ -231,7 +198,6 @@ GpuVideoDecoder::GpuVideoDecoder(
base::ProcessHandle handle,
gpu::gles2::GLES2Decoder* decoder)
: decoder_host_route_id_(param->decoder_host_route_id),
- output_transfer_buffer_busy_(false),
pending_output_requests_(0),
channel_(channel),
renderer_handle_(handle),
@@ -242,17 +208,17 @@ GpuVideoDecoder::GpuVideoDecoder(
// TODO(jiesun): find a better way to determine which VideoDecodeEngine
// to return on current platform.
decode_engine_.reset(new FakeGlVideoDecodeEngine());
+ video_device_.reset(new FakeGlVideoDevice());
}
void GpuVideoDecoder::OnInitialize(const GpuVideoDecoderInitParam& param) {
// TODO(hclam): Initialize the VideoDecodeContext first.
-
// TODO(jiesun): codec id should come from |param|.
config_.codec = media::kCodecH264;
config_.width = param.width;
config_.height = param.height;
config_.opaque_context = NULL;
- decode_engine_->Initialize(NULL, this, config_);
+ decode_engine_->Initialize(NULL, this, this, config_);
}
void GpuVideoDecoder::OnUninitialize() {
@@ -260,8 +226,6 @@ void GpuVideoDecoder::OnUninitialize() {
}
void GpuVideoDecoder::OnFlush() {
- // TODO(jiesun): this is wrong??
- output_transfer_buffer_busy_ = false;
pending_output_requests_ = 0;
decode_engine_->Flush();
@@ -285,49 +249,60 @@ void GpuVideoDecoder::OnEmptyThisBuffer(
void GpuVideoDecoder::OnFillThisBuffer(
const GpuVideoDecoderOutputBufferParam& param) {
// Switch context before calling to the decode engine.
- // TODO(hclam): This is temporary to allow FakeGlVideoDecodeEngine to issue
- // GL commands correctly.
bool ret = gles2_decoder_->MakeCurrent();
DCHECK(ret) << "Failed to switch context";
if (info_.stream_info.surface_type == VideoFrame::TYPE_SYSTEM_MEMORY) {
pending_output_requests_++;
- if (!output_transfer_buffer_busy_) {
- output_transfer_buffer_busy_ = true;
- decode_engine_->ProduceVideoFrame(frame_);
- }
} else {
- // TODO(hclam): I need to rethink how to delegate calls to
- // VideoDecodeEngine, I may need to create a GpuVideoDecodeContext that
- // provides a method for me to make calls to VideoDecodeEngine with the
- // correct VideoFrame.
- DCHECK_EQ(VideoFrame::TYPE_GL_TEXTURE, info_.stream_info.surface_type);
-
- scoped_refptr<media::VideoFrame> frame;
- VideoFrame::GlTexture textures[3] = { param.texture, 0, 0 };
-
- media::VideoFrame:: CreateFrameGlTexture(
- media::VideoFrame::RGBA, config_.width, config_.height, textures,
- base::TimeDelta(), base::TimeDelta(), &frame);
- decode_engine_->ProduceVideoFrame(frame);
}
}
void GpuVideoDecoder::OnFillThisBufferDoneACK() {
if (info_.stream_info.surface_type == VideoFrame::TYPE_SYSTEM_MEMORY) {
- output_transfer_buffer_busy_ = false;
pending_output_requests_--;
if (pending_output_requests_) {
- output_transfer_buffer_busy_ = true;
decode_engine_->ProduceVideoFrame(frame_);
}
}
}
+void GpuVideoDecoder::OnVideoFrameAllocated(int32 frame_id,
+ std::vector<uint32> textures) {
+ // This method is called in response to a video frame allocation request sent
+ // to the Renderer process.
+ // We should use the textures to generate a VideoFrame by using
+ // GpuVideoDevice. The VideoFrame created is added to the internal map.
+ // If we have generated enough VideoFrame, we call |allocation_callack_| to
+ // complete the allocation process.
+ for (size_t i = 0; i < textures.size(); ++i) {
+ media::VideoFrame::GlTexture gl_texture;
+ // Translate the client texture id to service texture id.
+ bool ret = gles2_decoder_->GetServiceTextureId(textures[i], &gl_texture);
+ DCHECK(ret) << "Cannot translate client texture ID to service ID";
+ textures[i] = gl_texture;
+ }
+
+ scoped_refptr<media::VideoFrame> frame;
+ bool ret = video_device_->CreateVideoFrameFromGlTextures(
+ pending_allocation_->width, pending_allocation_->height,
+ pending_allocation_->format, textures, &frame);
+
+ DCHECK(ret) << "Failed to allocation VideoFrame from GL textures)";
+ pending_allocation_->frames->push_back(frame);
+ video_frame_map_.insert(std::make_pair(frame_id, frame));
+
+ if (video_frame_map_.size() == pending_allocation_->n) {
+ pending_allocation_->task->Run();
+ delete pending_allocation_->task;
+ pending_allocation_.reset();
+ }
+}
+
void GpuVideoDecoder::SendInitializeDone(
const GpuVideoDecoderInitDoneParam& param) {
if (!channel_->Send(
- new GpuVideoDecoderHostMsg_InitializeACK(route_id(), param))) {
+ new GpuVideoDecoderHostMsg_InitializeACK(route_id(), param))) {
LOG(ERROR) << "GpuVideoDecoderMsg_InitializeACK failed";
}
}
@@ -346,14 +321,14 @@ void GpuVideoDecoder::SendFlushDone() {
void GpuVideoDecoder::SendEmptyBufferDone() {
if (!channel_->Send(
- new GpuVideoDecoderHostMsg_EmptyThisBufferDone(route_id()))) {
+ new GpuVideoDecoderHostMsg_EmptyThisBufferDone(route_id()))) {
LOG(ERROR) << "GpuVideoDecoderMsg_EmptyThisBufferDone failed";
}
}
void GpuVideoDecoder::SendEmptyBufferACK() {
if (!channel_->Send(
- new GpuVideoDecoderHostMsg_EmptyThisBufferACK(route_id()))) {
+ new GpuVideoDecoderHostMsg_EmptyThisBufferACK(route_id()))) {
LOG(ERROR) << "GpuVideoDecoderMsg_EmptyThisBufferACK failed";
}
}
@@ -361,7 +336,23 @@ void GpuVideoDecoder::SendEmptyBufferACK() {
void GpuVideoDecoder::SendFillBufferDone(
const GpuVideoDecoderOutputBufferParam& param) {
if (!channel_->Send(
- new GpuVideoDecoderHostMsg_FillThisBufferDone(route_id(), param))) {
+ new GpuVideoDecoderHostMsg_FillThisBufferDone(route_id(), param))) {
LOG(ERROR) << "GpuVideoDecoderMsg_FillThisBufferDone failed";
}
}
+
+void GpuVideoDecoder::SendAllocateVideoFrames(
+ int n, size_t width, size_t height, media::VideoFrame::Format format) {
+ if (!channel_->Send(
+ new GpuVideoDecoderHostMsg_AllocateVideoFrames(
+ route_id(), n, width, height, format))) {
+ LOG(ERROR) << "GpuVideoDecoderMsg_AllocateVideoFrames failed";
+ }
+}
+
+void GpuVideoDecoder::SendReleaseAllVideoFrames() {
+ if (!channel_->Send(
+ new GpuVideoDecoderHostMsg_ReleaseAllVideoFrames(route_id()))) {
+ LOG(ERROR) << "GpuVideoDecoderMsg_ReleaseAllVideoFrames failed";
+ }
+}
diff --git a/chrome/gpu/gpu_video_decoder.h b/chrome/gpu/gpu_video_decoder.h
index 7aabffa..d4c9b09 100644
--- a/chrome/gpu/gpu_video_decoder.h
+++ b/chrome/gpu/gpu_video_decoder.h
@@ -5,6 +5,9 @@
#ifndef CHROME_GPU_GPU_VIDEO_DECODER_H_
#define CHROME_GPU_GPU_VIDEO_DECODER_H_
+#include <map>
+#include <vector>
+
#include "base/basictypes.h"
#include "base/callback.h"
#include "base/ref_counted.h"
@@ -36,7 +39,7 @@ class GpuChannel;
// In addition to delegating video related commamnds to VideoDecodeEngine it
// has the following important functions:
//
-// Buffer Allocation
+// BUFFER ALLOCATION
//
// VideoDecodeEngine requires platform specific video frame buffer to operate.
// In order to abstract the platform specific bits GpuVideoDecoderContext is
@@ -63,20 +66,28 @@ class GpuChannel;
// VideoFrame(s) from the textures.
// 6. GpuVideoDecoder sends the VideoFrame(s) generated to VideoDecodeEngine.
//
-// Buffer Translation
+// BUFFER UPLOADING
+//
+// A VideoDecodeEngine always produces some device specific buffer. In order to
+// use them in Chrome we always upload them to GL textures. The upload step is
+// different on each platform and each subsystem. We perform these special
+// upload steps by using GpuVideoDevice which are written for each
+// VideoDecodeEngine.
+//
+// BUFFER MAPPING
//
// GpuVideoDecoder will be working with VideoDecodeEngine, they exchange
-// buffers that are only meaningful to VideoDecodeEngine. In order to translate
-// that to something we can transport in the IPC channel we need a mapping
-// between VideoFrame and buffer ID known between GpuVideoDecoder and
+// buffers that are only meaningful to VideoDecodeEngine. In order to map that
+// to something we can transport in the IPC channel we need a mapping between
+// VideoFrame and buffer ID known between GpuVideoDecoder and
// GpuVideoDecoderHost in the Renderer process.
//
// After texture allocation and VideoFrame allocation are done, GpuVideoDecoder
// will maintain such mapping.
//
class GpuVideoDecoder
- : public IPC::Channel::Listener,
- public base::RefCountedThreadSafe<GpuVideoDecoder>,
+ : public base::RefCountedThreadSafe<GpuVideoDecoder>,
+ public IPC::Channel::Listener,
public media::VideoDecodeEngine::EventHandler,
public media::VideoDecodeContext {
@@ -98,10 +109,14 @@ class GpuVideoDecoder
// VideoDecodeContext implementation.
virtual void* GetDevice();
- virtual void AllocateVideoFrames(int n, size_t width, size_t height,
- AllocationCompleteCallback* callback);
- virtual void ReleaseVideoFrames(int n, VideoFrame* frames);
- virtual void Destroy(DestructionCompleteCallback* callback);
+ virtual void AllocateVideoFrames(
+ int n, size_t width, size_t height, media::VideoFrame::Format format,
+ std::vector<scoped_refptr<media::VideoFrame> >* frames, Task* task);
+ virtual void ReleaseAllVideoFrames();
+ virtual void UploadToVideoFrame(void* buffer,
+ scoped_refptr<media::VideoFrame> frame,
+ Task* task);
+ virtual void Destroy(Task* task);
// Constructor and destructor.
GpuVideoDecoder(const GpuVideoDecoderInfoParam* param,
@@ -111,19 +126,45 @@ class GpuVideoDecoder
virtual ~GpuVideoDecoder() {}
private:
+ struct PendingAllocation {
+ size_t n;
+ size_t width;
+ size_t height;
+ media::VideoFrame::Format format;
+ std::vector<scoped_refptr<media::VideoFrame> >* frames;
+ Task* task;
+ };
+
int32 route_id() { return decoder_host_route_id_; }
bool CreateInputTransferBuffer(uint32 size,
base::SharedMemoryHandle* handle);
- bool CreateOutputTransferBuffer(uint32 size,
- base::SharedMemoryHandle* handle);
- void CreateVideoFrameOnTransferBuffer();
+
+ // These methods are message handlers for the messages sent from the Renderer
+ // process.
+ void OnInitialize(const GpuVideoDecoderInitParam& param);
+ void OnUninitialize();
+ void OnFlush();
+ void OnEmptyThisBuffer(const GpuVideoDecoderInputBufferParam& buffer);
+ void OnFillThisBuffer(const GpuVideoDecoderOutputBufferParam& param);
+ void OnFillThisBufferDoneACK();
+ void OnVideoFrameAllocated(int32 frame_id, std::vector<uint32> textures);
+
+ // Helper methods for sending messages to the Renderer process.
+ void SendInitializeDone(const GpuVideoDecoderInitDoneParam& param);
+ void SendUninitializeDone();
+ void SendFlushDone();
+ void SendEmptyBufferDone();
+ void SendEmptyBufferACK();
+ void SendFillBufferDone(const GpuVideoDecoderOutputBufferParam& param);
+ void SendAllocateVideoFrames(
+ int n, size_t width, size_t height, media::VideoFrame::Format format);
+ void SendReleaseAllVideoFrames();
int32 decoder_host_route_id_;
// Used only in system memory path. i.e. Remove this later.
scoped_refptr<VideoFrame> frame_;
- bool output_transfer_buffer_busy_;
int32 pending_output_requests_;
GpuChannel* channel_;
@@ -133,29 +174,26 @@ class GpuVideoDecoder
// is used to switch context and translate client texture ID to service ID.
gpu::gles2::GLES2Decoder* gles2_decoder_;
+ // Memory for transfering the input data for the hardware video decoder.
scoped_ptr<base::SharedMemory> input_transfer_buffer_;
- scoped_ptr<base::SharedMemory> output_transfer_buffer_;
+ // VideoDecodeEngine is used to do the actual video decoding.
scoped_ptr<media::VideoDecodeEngine> decode_engine_;
- scoped_ptr<GpuVideoDevice> decode_context_;
- media::VideoCodecConfig config_;
- media::VideoCodecInfo info_;
- // Input message handler.
- void OnInitialize(const GpuVideoDecoderInitParam& param);
- void OnUninitialize();
- void OnFlush();
- void OnEmptyThisBuffer(const GpuVideoDecoderInputBufferParam& buffer);
- void OnFillThisBuffer(const GpuVideoDecoderOutputBufferParam& param);
- void OnFillThisBufferDoneACK();
+ // GpuVideoDevice is used to generate VideoFrame(s) from GL textures. The
+ // frames generated are understood by the decode engine.
+ scoped_ptr<GpuVideoDevice> video_device_;
- // Output message helper.
- void SendInitializeDone(const GpuVideoDecoderInitDoneParam& param);
- void SendUninitializeDone();
- void SendFlushDone();
- void SendEmptyBufferDone();
- void SendEmptyBufferACK();
- void SendFillBufferDone(const GpuVideoDecoderOutputBufferParam& param);
+ // Contain information for allocation VideoFrame(s).
+ scoped_ptr<PendingAllocation> pending_allocation_;
+
+ // Contains the mapping between a |frame_id| and VideoFrame generated by
+ // GpuVideoDevice from the associated GL textures.
+ typedef std::map<int32, scoped_refptr<media::VideoFrame> > VideoFrameMap;
+ VideoFrameMap video_frame_map_;
+
+ media::VideoCodecConfig config_;
+ media::VideoCodecInfo info_;
DISALLOW_COPY_AND_ASSIGN(GpuVideoDecoder);
};
diff --git a/chrome/gpu/media/fake_gl_video_decode_engine.cc b/chrome/gpu/media/fake_gl_video_decode_engine.cc
index 52e2dfd..b3d093f 100644
--- a/chrome/gpu/media/fake_gl_video_decode_engine.cc
+++ b/chrome/gpu/media/fake_gl_video_decode_engine.cc
@@ -4,7 +4,8 @@
#include "chrome/gpu/media/fake_gl_video_decode_engine.h"
-#include "app/gfx/gl/gl_bindings.h"
+#include "media/base/video_frame.h"
+#include "media/video/video_decode_context.h"
FakeGlVideoDecodeEngine::FakeGlVideoDecodeEngine()
: width_(0),
@@ -18,11 +19,33 @@ FakeGlVideoDecodeEngine::~FakeGlVideoDecodeEngine() {
void FakeGlVideoDecodeEngine::Initialize(
MessageLoop* message_loop,
media::VideoDecodeEngine::EventHandler* event_handler,
+ media::VideoDecodeContext* context,
const media::VideoCodecConfig& config) {
handler_ = event_handler;
+ context_ = context;
width_ = config.width;
height_ = config.height;
+ // Create an internal VideoFrame that we can write to. This is going to be
+ // uploaded through VideoDecodeContext.
+ media::VideoFrame::CreateFrame(
+ media::VideoFrame::RGBA, width_, height_, base::TimeDelta(),
+ base::TimeDelta(), &internal_frame_);
+ memset(internal_frame_->data(media::VideoFrame::kRGBPlane), 0,
+ height_ * internal_frame_->stride(media::VideoFrame::kRGBPlane));
+
+ // Use VideoDecodeContext to allocate VideoFrame that can be consumed by
+ // external body.
+ context_->AllocateVideoFrames(
+ 1, width_, height_, media::VideoFrame::RGBA, &external_frames_,
+ NewRunnableMethod(this,
+ &FakeGlVideoDecodeEngine::AllocationCompleteTask));
+}
+
+void FakeGlVideoDecodeEngine::AllocationCompleteTask() {
+ DCHECK_EQ(1u, external_frames_.size());
+ DCHECK_EQ(media::VideoFrame::TYPE_GL_TEXTURE, external_frames_[0]->type());
+
media::VideoCodecInfo info;
info.success = true;
info.provides_buffers = true;
@@ -30,9 +53,6 @@ void FakeGlVideoDecodeEngine::Initialize(
info.stream_info.surface_type = media::VideoFrame::TYPE_GL_TEXTURE;
info.stream_info.surface_width = width_;
info.stream_info.surface_height = height_;
-
- // TODO(hclam): When we have VideoDecodeContext we should use it to allocate
- // video frames.
handler_->OnInitializeComplete(info);
}
@@ -62,7 +82,7 @@ void FakeGlVideoDecodeEngine::ProduceVideoFrame(
scoped_array<uint8> buffer(new uint8[size]);
memset(buffer.get(), 0, size);
- uint8* row = buffer.get();
+ uint8* row = internal_frame_->data(media::VideoFrame::kRGBPlane);
static int seed = 0;
for (int y = 0; y < height_; ++y) {
@@ -75,14 +95,18 @@ void FakeGlVideoDecodeEngine::ProduceVideoFrame(
}
++seed;
- // Assume we are in the right context and then upload the content to the
- // texture.
- glBindTexture(GL_TEXTURE_2D,
- frame->gl_texture(media::VideoFrame::kRGBPlane));
- glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA, width_, height_, 0, GL_RGBA,
- GL_UNSIGNED_BYTE, buffer.get());
+ // After we have filled the content upload the internal frame to the
+ // VideoFrame allocated through VideoDecodeContext.
+ context_->UploadToVideoFrame(
+ internal_frame_, external_frames_[0],
+ NewRunnableMethod(this, &FakeGlVideoDecodeEngine::UploadCompleteTask,
+ external_frames_[0]));
+}
- // We have done generating data to the frame so give it to the handler.
- // TODO(hclam): Advance the timestamp every time we call this method.
+void FakeGlVideoDecodeEngine::UploadCompleteTask(
+ scoped_refptr<media::VideoFrame> frame) {
+ // |frame| is the upload target. We can immediately send this frame out.
handler_->ConsumeVideoFrame(frame);
}
+
+DISABLE_RUNNABLE_METHOD_REFCOUNT(FakeGlVideoDecodeEngine);
diff --git a/chrome/gpu/media/fake_gl_video_decode_engine.h b/chrome/gpu/media/fake_gl_video_decode_engine.h
index c3eeb3e..164c8c4 100644
--- a/chrome/gpu/media/fake_gl_video_decode_engine.h
+++ b/chrome/gpu/media/fake_gl_video_decode_engine.h
@@ -5,10 +5,13 @@
#ifndef CHROME_GPU_MEDIA_FAKE_GL_VIDEO_DECODE_ENGINE_H_
#define CHROME_GPU_MEDIA_FAKE_GL_VIDEO_DECODE_ENGINE_H_
+#include <vector>
+
#include "base/scoped_ptr.h"
#include "media/video/video_decode_engine.h"
namespace media {
+class VideoDecodeContext;
class VideoFrame;
} // namespace media
@@ -20,6 +23,7 @@ class FakeGlVideoDecodeEngine : public media::VideoDecodeEngine {
virtual void Initialize(
MessageLoop* message_loop,
media::VideoDecodeEngine::EventHandler* event_handler,
+ media::VideoDecodeContext* context,
const media::VideoCodecConfig& config);
virtual void Uninitialize();
@@ -29,9 +33,25 @@ class FakeGlVideoDecodeEngine : public media::VideoDecodeEngine {
virtual void ProduceVideoFrame(scoped_refptr<media::VideoFrame> frame);
private:
+ // This method is called when video frames allocation is completed by
+ // VideoDecodeContext.
+ void AllocationCompleteTask();
+
+ // This method is called by VideoDecodeContext when uploading to a VideoFrame
+ // has completed.
+ void UploadCompleteTask(scoped_refptr<media::VideoFrame> frame);
+
int width_;
int height_;
media::VideoDecodeEngine::EventHandler* handler_;
+ media::VideoDecodeContext* context_;
+
+ // Internal video frame that is to be uploaded through VideoDecodeContext.
+ scoped_refptr<media::VideoFrame> internal_frame_;
+
+ // VideoFrame(s) allocated through VideoDecodeContext. These frames are
+ // opaque to us. And we need an extra upload step.
+ std::vector<scoped_refptr<media::VideoFrame> > external_frames_;
DISALLOW_COPY_AND_ASSIGN(FakeGlVideoDecodeEngine);
};
diff --git a/chrome/gpu/media/fake_gl_video_device.cc b/chrome/gpu/media/fake_gl_video_device.cc
new file mode 100644
index 0000000..df1b5b1
--- /dev/null
+++ b/chrome/gpu/media/fake_gl_video_device.cc
@@ -0,0 +1,60 @@
+// Copyright (c) 2010 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "chrome/gpu/media/fake_gl_video_device.h"
+
+#include "app/gfx/gl/gl_bindings.h"
+#include "media/base/video_frame.h"
+
+void* FakeGlVideoDevice::GetDevice() {
+ // No actual hardware device should be used.
+ return NULL;
+}
+
+bool FakeGlVideoDevice::CreateVideoFrameFromGlTextures(
+ size_t width, size_t height, media::VideoFrame::Format format,
+ const std::vector<media::VideoFrame::GlTexture>& textures,
+ scoped_refptr<media::VideoFrame>* frame) {
+ media::VideoFrame::GlTexture texture_array[media::VideoFrame::kMaxPlanes];
+ memset(texture_array, 0, sizeof(texture_array));
+
+ for (size_t i = 0; i < textures.size(); ++i) {
+ texture_array[i] = textures[i];
+ }
+
+ media::VideoFrame::CreateFrameGlTexture(format,
+ width,
+ height,
+ texture_array,
+ base::TimeDelta(),
+ base::TimeDelta(),
+ frame);
+ return *frame != NULL;
+}
+
+void FakeGlVideoDevice::ReleaseVideoFrame(
+ const scoped_refptr<media::VideoFrame>& frame) {
+ // We didn't need to anything here because we didin't allocate any resources
+ // for the VideoFrame(s) generated.
+}
+
+bool FakeGlVideoDevice::UploadToVideoFrame(
+ void* buffer, scoped_refptr<media::VideoFrame> frame) {
+ // Assume we are in the right context and then upload the content to the
+ // texture.
+ glBindTexture(GL_TEXTURE_2D,
+ frame->gl_texture(media::VideoFrame::kRGBPlane));
+
+ // |buffer| is also a VideoFrame.
+ scoped_refptr<media::VideoFrame> frame_to_upload(
+ reinterpret_cast<media::VideoFrame*>(buffer));
+ DCHECK_EQ(frame->width(), frame_to_upload->width());
+ DCHECK_EQ(frame->height(), frame_to_upload->height());
+ DCHECK_EQ(frame->format(), frame_to_upload->format());
+ glTexImage2D(
+ GL_TEXTURE_2D, 0, GL_RGBA, frame_to_upload->width(),
+ frame_to_upload->height(), 0, GL_RGBA,
+ GL_UNSIGNED_BYTE, frame_to_upload->data(media::VideoFrame::kRGBPlane));
+ return true;
+}
diff --git a/chrome/gpu/media/fake_gl_video_device.h b/chrome/gpu/media/fake_gl_video_device.h
new file mode 100644
index 0000000..711c3ef
--- /dev/null
+++ b/chrome/gpu/media/fake_gl_video_device.h
@@ -0,0 +1,27 @@
+// Copyright (c) 2010 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef CHROME_GPU_MEDIA_FAKE_GL_VIDEO_DEVICE_H_
+#define CHROME_GPU_MEDIA_FAKE_GL_VIDEO_DEVICE_H_
+
+#include "chrome/gpu/media/gpu_video_device.h"
+
+// A simple GpuVideoDevice that create VideoFrame with GL textures.
+// It uploads frames in RGBA format in system memory to the GL texture.
+class FakeGlVideoDevice : public GpuVideoDevice {
+ public:
+ virtual ~FakeGlVideoDevice() {}
+
+ virtual void* GetDevice();
+ virtual bool CreateVideoFrameFromGlTextures(
+ size_t width, size_t height, media::VideoFrame::Format format,
+ const std::vector<media::VideoFrame::GlTexture>& textures,
+ scoped_refptr<media::VideoFrame>* frame);
+ virtual void ReleaseVideoFrame(
+ const scoped_refptr<media::VideoFrame>& frame);
+ virtual bool UploadToVideoFrame(void* buffer,
+ scoped_refptr<media::VideoFrame> frame);
+};
+
+#endif // CHROME_GPU_MEDIA_FAKE_GL_VIDEO_DEVICE_H_
diff --git a/chrome/gpu/media/gpu_video_device.h b/chrome/gpu/media/gpu_video_device.h
index 7998070..0556903 100644
--- a/chrome/gpu/media/gpu_video_device.h
+++ b/chrome/gpu/media/gpu_video_device.h
@@ -5,6 +5,8 @@
#ifndef CHROME_GPU_MEDIA_GPU_VIDEO_DEVICE_H_
#define CHROME_GPU_MEDIA_GPU_VIDEO_DEVICE_H_
+#include <vector>
+
#include "media/base/video_frame.h"
#include "media/video/video_decode_context.h"
@@ -31,16 +33,25 @@ class GpuVideoDevice {
//
// VideoFrame generated is used by VideoDecodeEngine for output buffer.
//
- // |frames| will contain the set of VideoFrame(s) generated.
+ // |frame| will contain the VideoFrame generated.
//
// Return true if the operation was successful.
virtual bool CreateVideoFrameFromGlTextures(
size_t width, size_t height, media::VideoFrame::Format format,
- media::VideoFrame::GlTexture const* textures,
+ const std::vector<media::VideoFrame::GlTexture>& textures,
scoped_refptr<media::VideoFrame>* frame) = 0;
// Release VideoFrame generated.
- virtual void ReleaseVideoFrame(scoped_refptr<media::VideoFrame> frame) = 0;
+ virtual void ReleaseVideoFrame(
+ const scoped_refptr<media::VideoFrame>& frame) = 0;
+
+ // Upload a device specific buffer to a VideoFrame object that can be used in
+ // the GPU process.
+ //
+ // Return true if successful.
+ // TODO(hclam): Rename this to ConvertToVideoFrame().
+ virtual bool UploadToVideoFrame(void* buffer,
+ scoped_refptr<media::VideoFrame> frame) = 0;
};
#endif // CHROME_GPU_MEDIA_GPU_VIDEO_DEVICE_H_
diff --git a/chrome/renderer/gpu_video_decoder_host.cc b/chrome/renderer/gpu_video_decoder_host.cc
index a585ede..771c38a 100644
--- a/chrome/renderer/gpu_video_decoder_host.cc
+++ b/chrome/renderer/gpu_video_decoder_host.cc
@@ -109,12 +109,9 @@ void GpuVideoDecoderHost::FillThisBuffer(scoped_refptr<VideoFrame> frame) {
// TODO(hclam): We should keep an IDMap to convert between a frame a buffer
// ID so that we can signal GpuVideoDecoder in GPU process to use the buffer.
// This eliminates one conversion step.
+ // TODO(hclam): Fill the param.
GpuVideoDecoderOutputBufferParam param;
- // TODO(hclam): This is a hack to pass the texture id to the hardware video
- // decoder. We should have created a mapping between VideoFrame and buffer id
- // and we pass the buffer id to the GPU process.
- param.texture = frame->gl_texture(VideoFrame::kRGBPlane);
if (!channel_host_ || !channel_host_->Send(
new GpuVideoDecoderMsg_FillThisBuffer(route_id(), param))) {
LOG(ERROR) << "GpuVideoDecoderMsg_FillThisBuffer failed";
@@ -152,13 +149,6 @@ void GpuVideoDecoderHost::OnInitializeDone(
if (!input_transfer_buffer_->Map(param.input_buffer_size))
break;
- if (!base::SharedMemory::IsHandleValid(param.output_buffer_handle))
- break;
- output_transfer_buffer_.reset(
- new base::SharedMemory(param.output_buffer_handle, false));
- if (!output_transfer_buffer_->Map(param.output_buffer_size))
- break;
-
success = true;
} while (0);
@@ -168,7 +158,6 @@ void GpuVideoDecoderHost::OnInitializeDone(
void GpuVideoDecoderHost::OnUninitializeDone() {
input_transfer_buffer_.reset();
- output_transfer_buffer_.reset();
event_handler_->OnUninitializeDone();
}
@@ -189,27 +178,11 @@ void GpuVideoDecoderHost::OnFillThisBufferDone(
if (param.flags & GpuVideoDecoderOutputBufferParam::kFlagsEndOfStream) {
VideoFrame::CreateEmptyFrame(&frame);
- } else if (done_param_.surface_type ==
- media::VideoFrame::TYPE_SYSTEM_MEMORY) {
- VideoFrame::CreateFrame(VideoFrame::YV12,
- init_param_.width,
- init_param_.height,
- base::TimeDelta::FromMicroseconds(param.timestamp),
- base::TimeDelta::FromMicroseconds(param.duration),
- &frame);
- uint8* src = static_cast<uint8*>(output_transfer_buffer_->memory());
- uint8* data0 = frame->data(0);
- uint8* data1 = frame->data(1);
- uint8* data2 = frame->data(2);
- int32 size = init_param_.width * init_param_.height;
- memcpy(data0, src, size);
- memcpy(data1, src + size, size / 4);
- memcpy(data2, src + size + size / 4, size / 4);
- } else if (done_param_.surface_type == media::VideoFrame::TYPE_GL_TEXTURE) {
+ } else {
// TODO(hclam): The logic in buffer allocation is pretty much around
- // using shared memory for output buffer which needs to be adjusted. For
- // now we have to add this hack to get the texture id.
- VideoFrame::GlTexture textures[3] = { param.texture, 0, 0 };
+ // using shared memory for output buffer which needs to be adjusted.
+ // Fake the texture ID until we implement it properly.
+ VideoFrame::GlTexture textures[3] = { 0, 0, 0 };
media::VideoFrame::CreateFrameGlTexture(
media::VideoFrame::RGBA, init_param_.width, init_param_.height,
textures,
diff --git a/chrome/renderer/gpu_video_decoder_host.h b/chrome/renderer/gpu_video_decoder_host.h
index 02f8fc8..1255bf5 100644
--- a/chrome/renderer/gpu_video_decoder_host.h
+++ b/chrome/renderer/gpu_video_decoder_host.h
@@ -112,7 +112,6 @@ class GpuVideoDecoderHost
// Transfer buffers for both input and output.
// TODO(jiesun): remove output buffer when hardware composition is ready.
scoped_ptr<base::SharedMemory> input_transfer_buffer_;
- scoped_ptr<base::SharedMemory> output_transfer_buffer_;
DISALLOW_COPY_AND_ASSIGN(GpuVideoDecoderHost);
};
diff --git a/chrome/renderer/media/gles2_video_decode_context.cc b/chrome/renderer/media/gles2_video_decode_context.cc
index 075f80a..b7eec02 100644
--- a/chrome/renderer/media/gles2_video_decode_context.cc
+++ b/chrome/renderer/media/gles2_video_decode_context.cc
@@ -20,15 +20,20 @@ void* Gles2VideoDecodeContext::GetDevice() {
}
void Gles2VideoDecodeContext::AllocateVideoFrames(
- int n, size_t width, size_t height, AllocationCompleteCallback* callback) {
+ int n, size_t width, size_t height, media::VideoFrame::Format format,
+ std::vector<scoped_refptr<media::VideoFrame> >* frames, Task* task) {
// TODO(hclam): Implement.
}
-void Gles2VideoDecodeContext::ReleaseVideoFrames(int n,
- media::VideoFrame* frames) {
+void Gles2VideoDecodeContext::ReleaseAllVideoFrames() {
// TODO(hclam): Implement.
}
-void Gles2VideoDecodeContext::Destroy(DestructionCompleteCallback* callback) {
+void Gles2VideoDecodeContext::UploadToVideoFrame(
+ void* buffer, scoped_refptr<media::VideoFrame> frame, Task* task) {
+ // TODO(hclam): Implement.
+}
+
+void Gles2VideoDecodeContext::Destroy(Task* task) {
// TODO(hclam): Implement.
}
diff --git a/chrome/renderer/media/gles2_video_decode_context.h b/chrome/renderer/media/gles2_video_decode_context.h
index e087bb3..4f556ab 100644
--- a/chrome/renderer/media/gles2_video_decode_context.h
+++ b/chrome/renderer/media/gles2_video_decode_context.h
@@ -95,10 +95,14 @@ class Gles2VideoDecodeContext : public media::VideoDecodeContext {
// media::VideoDecodeContext implementation.
virtual void* GetDevice();
- virtual void AllocateVideoFrames(int n, size_t width, size_t height,
- AllocationCompleteCallback* callback);
- virtual void ReleaseVideoFrames(int n, media::VideoFrame* frames);
- virtual void Destroy(DestructionCompleteCallback* callback);
+ virtual void AllocateVideoFrames(
+ int n, size_t width, size_t height, media::VideoFrame::Format format,
+ std::vector<scoped_refptr<media::VideoFrame> >* frames, Task* task);
+ virtual void ReleaseAllVideoFrames();
+ virtual void UploadToVideoFrame(void* buffer,
+ scoped_refptr<media::VideoFrame> frame,
+ Task* task);
+ virtual void Destroy(Task* task);
//--------------------------------------------------------------------------
// Any thread
diff --git a/chrome/renderer/media/ipc_video_decoder.cc b/chrome/renderer/media/ipc_video_decoder.cc
index 0b6896b..eda8696 100644
--- a/chrome/renderer/media/ipc_video_decoder.cc
+++ b/chrome/renderer/media/ipc_video_decoder.cc
@@ -111,10 +111,9 @@ void IpcVideoDecoder::OnInitializeDone(
media::mime_type::kUncompressedVideo);
media_format_.SetAsInteger(media::MediaFormat::kWidth, width_);
media_format_.SetAsInteger(media::MediaFormat::kHeight, height_);
- media_format_.SetAsInteger(media::MediaFormat::kSurfaceType,
- static_cast<int>(param.surface_type));
- media_format_.SetAsInteger(media::MediaFormat::kSurfaceFormat,
- static_cast<int>(param.format));
+ media_format_.SetAsInteger(
+ media::MediaFormat::kSurfaceType,
+ static_cast<int>(media::VideoFrame::TYPE_GL_TEXTURE));
state_ = kPlaying;
} else {
LOG(ERROR) << "IpcVideoDecoder initialization failed!";
diff --git a/gpu/command_buffer/service/gles2_cmd_decoder.cc b/gpu/command_buffer/service/gles2_cmd_decoder.cc
index 139e1af..9c11bb5 100644
--- a/gpu/command_buffer/service/gles2_cmd_decoder.cc
+++ b/gpu/command_buffer/service/gles2_cmd_decoder.cc
@@ -537,6 +537,8 @@ class GLES2DecoderImpl : public base::SupportsWeakPtr<GLES2DecoderImpl>,
virtual gfx::GLContext* GetGLContext() { return context_.get(); }
virtual void SetSwapBuffersCallback(Callback0::Type* callback);
+ virtual bool GetServiceTextureId(uint32 client_texture_id,
+ uint32* service_texture_id);
// Restores the current state to the user's settings.
void RestoreCurrentFramebufferBindings();
@@ -2051,6 +2053,17 @@ void GLES2DecoderImpl::SetSwapBuffersCallback(Callback0::Type* callback) {
swap_buffers_callback_.reset(callback);
}
+bool GLES2DecoderImpl::GetServiceTextureId(uint32 client_texture_id,
+ uint32* service_texture_id) {
+ TextureManager::TextureInfo* texture =
+ texture_manager()->GetTextureInfo(client_texture_id);
+ if (texture) {
+ *service_texture_id = texture->service_id();
+ return true;
+ }
+ return false;
+}
+
void GLES2DecoderImpl::Destroy() {
if (context_.get()) {
MakeCurrent();
diff --git a/gpu/command_buffer/service/gles2_cmd_decoder.h b/gpu/command_buffer/service/gles2_cmd_decoder.h
index e94df94..694fa37 100644
--- a/gpu/command_buffer/service/gles2_cmd_decoder.h
+++ b/gpu/command_buffer/service/gles2_cmd_decoder.h
@@ -82,6 +82,13 @@ class GLES2Decoder : public CommonDecoder {
// Sets a callback which is called when a SwapBuffers command is processed.
virtual void SetSwapBuffersCallback(Callback0::Type* callback) = 0;
+ // Get the service texture ID corresponding to a client texture ID.
+ // If no such record is found then return false.
+ virtual bool GetServiceTextureId(uint32 client_texture_id,
+ uint32* service_texture_id) {
+ return false;
+ }
+
protected:
explicit GLES2Decoder(ContextGroup* group);
diff --git a/media/filters/ffmpeg_video_decoder.cc b/media/filters/ffmpeg_video_decoder.cc
index 6faa410e..c79f679 100644
--- a/media/filters/ffmpeg_video_decoder.cc
+++ b/media/filters/ffmpeg_video_decoder.cc
@@ -95,7 +95,7 @@ void FFmpegVideoDecoder::Initialize(DemuxerStream* demuxer_stream,
config.opaque_context = av_stream;
config.width = width_;
config.height = height_;
- decode_engine_->Initialize(message_loop(), this, config);
+ decode_engine_->Initialize(message_loop(), this, NULL, config);
}
void FFmpegVideoDecoder::OnInitializeComplete(const VideoCodecInfo& info) {
diff --git a/media/filters/ffmpeg_video_decoder_unittest.cc b/media/filters/ffmpeg_video_decoder_unittest.cc
index 6f2eb95..6ed176f 100644
--- a/media/filters/ffmpeg_video_decoder_unittest.cc
+++ b/media/filters/ffmpeg_video_decoder_unittest.cc
@@ -18,6 +18,7 @@
#include "media/filters/ffmpeg_interfaces.h"
#include "media/filters/ffmpeg_video_decoder.h"
#include "media/video/video_decode_engine.h"
+#include "media/video/video_decode_context.h"
#include "testing/gtest/include/gtest/gtest.h"
using ::testing::_;
@@ -50,8 +51,9 @@ class MockFFmpegDemuxerStream : public MockDemuxerStream,
// TODO(hclam): Share this in a separate file.
class MockVideoDecodeEngine : public VideoDecodeEngine {
public:
- MOCK_METHOD3(Initialize, void(MessageLoop* message_loop,
+ MOCK_METHOD4(Initialize, void(MessageLoop* message_loop,
VideoDecodeEngine::EventHandler* event_handler,
+ VideoDecodeContext* context,
const VideoCodecConfig& config));
MOCK_METHOD1(ConsumeVideoSample, void(scoped_refptr<Buffer> buffer));
MOCK_METHOD1(ProduceVideoFrame, void(scoped_refptr<VideoFrame> buffer));
@@ -182,7 +184,7 @@ class FFmpegVideoDecoderTest : public testing::Test {
EXPECT_CALL(*demuxer_, GetAVStream())
.WillOnce(Return(&stream_));
- EXPECT_CALL(*engine_, Initialize(_, _, _))
+ EXPECT_CALL(*engine_, Initialize(_, _, _, _))
.WillOnce(EngineInitialize(engine_, true));
EXPECT_CALL(callback_, OnFilterCallback());
@@ -264,7 +266,7 @@ TEST_F(FFmpegVideoDecoderTest, Initialize_EngineFails) {
EXPECT_CALL(*demuxer_, GetAVStream())
.WillOnce(Return(&stream_));
- EXPECT_CALL(*engine_, Initialize(_, _, _))
+ EXPECT_CALL(*engine_, Initialize(_, _, _, _))
.WillOnce(EngineInitialize(engine_, false));
EXPECT_CALL(host_, SetError(PIPELINE_ERROR_DECODE));
diff --git a/media/filters/omx_video_decoder.cc b/media/filters/omx_video_decoder.cc
index 80a5a13..82793de 100644
--- a/media/filters/omx_video_decoder.cc
+++ b/media/filters/omx_video_decoder.cc
@@ -110,7 +110,7 @@ void OmxVideoDecoder::Initialize(DemuxerStream* demuxer_stream,
config.opaque_context = NULL;
config.width = width_;
config.height = height_;
- omx_engine_->Initialize(message_loop(), this, config);
+ omx_engine_->Initialize(message_loop(), this, NULL, config);
}
void OmxVideoDecoder::OnInitializeComplete(const VideoCodecInfo& info) {
diff --git a/media/mf/mft_h264_decoder.cc b/media/mf/mft_h264_decoder.cc
index 6d85f70..e8d6b05 100644
--- a/media/mf/mft_h264_decoder.cc
+++ b/media/mf/mft_h264_decoder.cc
@@ -170,6 +170,7 @@ MftH264Decoder::~MftH264Decoder() {
void MftH264Decoder::Initialize(
MessageLoop* message_loop,
VideoDecodeEngine::EventHandler* event_handler,
+ VideoDecodeContext* context,
const VideoCodecConfig& config) {
LOG(INFO) << "MftH264Decoder::Initialize";
if (state_ != kUninitialized) {
diff --git a/media/mf/mft_h264_decoder.h b/media/mf/mft_h264_decoder.h
index 61e3c65..57c9e9f 100644
--- a/media/mf/mft_h264_decoder.h
+++ b/media/mf/mft_h264_decoder.h
@@ -36,7 +36,8 @@ class MftH264Decoder : public media::VideoDecodeEngine {
explicit MftH264Decoder(bool use_dxva, HWND draw_window);
~MftH264Decoder();
virtual void Initialize(MessageLoop* message_loop,
- media::VideoDecodeEngine::EventHandler* event_handler,
+ VideoDecodeEngine::EventHandler* event_handler,
+ VideoDecodeContext* context,
const VideoCodecConfig& config);
virtual void Uninitialize();
virtual void Flush();
diff --git a/media/mf/mft_h264_decoder_example.cc b/media/mf/mft_h264_decoder_example.cc
index 788a2ca..0ed9553 100644
--- a/media/mf/mft_h264_decoder_example.cc
+++ b/media/mf/mft_h264_decoder_example.cc
@@ -362,11 +362,7 @@ static int Run(bool use_dxva, bool render, const std::string& input_file) {
return -1;
}
- mft->Initialize(MessageLoop::current(), handler.get(), config);
- if (!handler->info_.success) {
- LOG(ERROR) << "Failed to initialize decoder";
- return -1;
- }
+ mft->Initialize(MessageLoop::current(), handler.get(), NULL, config);
scoped_ptr<WindowObserver> observer;
if (render) {
observer.reset(new WindowObserver(reader.get(), mft.get()));
diff --git a/media/mf/test/mft_h264_decoder_unittest.cc b/media/mf/test/mft_h264_decoder_unittest.cc
index 67dc07c..11959f7 100644
--- a/media/mf/test/mft_h264_decoder_unittest.cc
+++ b/media/mf/test/mft_h264_decoder_unittest.cc
@@ -207,7 +207,7 @@ TEST_F(MftH264DecoderTest, DecoderInitMissingArgs) {
config.height = 600;
scoped_ptr<MftH264Decoder> decoder(new MftH264Decoder(false, NULL));
ASSERT_TRUE(decoder.get());
- decoder->Initialize(NULL, NULL, config);
+ decoder->Initialize(NULL, NULL, NULL, config);
EXPECT_EQ(MftH264Decoder::kUninitialized, decoder->state());
}
@@ -219,7 +219,7 @@ TEST_F(MftH264DecoderTest, DecoderInitNoDxva) {
config.height = 600;
scoped_ptr<MftH264Decoder> decoder(new MftH264Decoder(false, NULL));
ASSERT_TRUE(decoder.get());
- decoder->Initialize(&loop, &handler, config);
+ decoder->Initialize(&loop, &handler, NULL, config);
EXPECT_EQ(1, handler.init_count_);
EXPECT_EQ(MftH264Decoder::kNormal, decoder->state());
decoder->Uninitialize();
@@ -235,7 +235,7 @@ TEST_F(MftH264DecoderTest, DecoderInitDxva) {
ASSERT_TRUE(hwnd);
scoped_ptr<MftH264Decoder> decoder(new MftH264Decoder(true, hwnd));
ASSERT_TRUE(decoder.get());
- decoder->Initialize(&loop, &handler, config);
+ decoder->Initialize(&loop, &handler, NULL, config);
EXPECT_EQ(1, handler.init_count_);
EXPECT_EQ(MftH264Decoder::kNormal, decoder->state());
decoder->Uninitialize();
@@ -250,7 +250,7 @@ TEST_F(MftH264DecoderTest, DecoderUninit) {
config.height = 600;
scoped_ptr<MftH264Decoder> decoder(new MftH264Decoder(false, NULL));
ASSERT_TRUE(decoder.get());
- decoder->Initialize(&loop, &handler, config);
+ decoder->Initialize(&loop, &handler, NULL, config);
EXPECT_EQ(MftH264Decoder::kNormal, decoder->state());
decoder->Uninitialize();
EXPECT_EQ(1, handler.uninit_count_);
@@ -277,7 +277,7 @@ TEST_F(MftH264DecoderTest, InitWithNegativeDimensions) {
config.height = -456;
scoped_ptr<MftH264Decoder> decoder(new MftH264Decoder(false, NULL));
ASSERT_TRUE(decoder.get());
- decoder->Initialize(&loop, &handler, config);
+ decoder->Initialize(&loop, &handler, NULL, config);
EXPECT_EQ(MftH264Decoder::kNormal, decoder->state());
EXPECT_EQ(kDecoderMaxWidth, handler.info_.stream_info.surface_width);
EXPECT_EQ(kDecoderMaxHeight, handler.info_.stream_info.surface_height);
@@ -292,7 +292,7 @@ TEST_F(MftH264DecoderTest, InitWithTooHighDimensions) {
config.height = kDecoderMaxHeight + 1;
scoped_ptr<MftH264Decoder> decoder(new MftH264Decoder(false, NULL));
ASSERT_TRUE(decoder.get());
- decoder->Initialize(&loop, &handler, config);
+ decoder->Initialize(&loop, &handler, NULL, config);
EXPECT_EQ(MftH264Decoder::kNormal, decoder->state());
EXPECT_EQ(kDecoderMaxWidth, handler.info_.stream_info.surface_width);
EXPECT_EQ(kDecoderMaxHeight, handler.info_.stream_info.surface_height);
@@ -307,7 +307,7 @@ TEST_F(MftH264DecoderTest, DrainOnEmptyBuffer) {
config.height = 768;
scoped_ptr<MftH264Decoder> decoder(new MftH264Decoder(false, NULL));
ASSERT_TRUE(decoder.get());
- decoder->Initialize(&loop, &handler, config);
+ decoder->Initialize(&loop, &handler, NULL, config);
EXPECT_EQ(MftH264Decoder::kNormal, decoder->state());
scoped_refptr<Buffer> buffer(new DataBuffer(0));
@@ -336,7 +336,7 @@ TEST_F(MftH264DecoderTest, NoOutputOnGarbageInput) {
config.height = 768;
scoped_ptr<MftH264Decoder> decoder(new MftH264Decoder(false, NULL));
ASSERT_TRUE(decoder.get());
- decoder->Initialize(&loop, &handler, config);
+ decoder->Initialize(&loop, &handler, NULL, config);
EXPECT_EQ(MftH264Decoder::kNormal, decoder->state());
handler.SetReader(reader);
handler.SetDecoder(decoder.get());
@@ -364,7 +364,7 @@ TEST_F(MftH264DecoderTest, FlushAtStart) {
config.height = 768;
scoped_ptr<MftH264Decoder> decoder(new MftH264Decoder(false, NULL));
ASSERT_TRUE(decoder.get());
- decoder->Initialize(&loop, &handler, config);
+ decoder->Initialize(&loop, &handler, NULL, config);
EXPECT_EQ(MftH264Decoder::kNormal, decoder->state());
decoder->Flush();
@@ -384,7 +384,7 @@ TEST_F(MftH264DecoderTest, NoFlushAtStopped) {
config.height = 768;
scoped_ptr<MftH264Decoder> decoder(new MftH264Decoder(false, NULL));
ASSERT_TRUE(decoder.get());
- decoder->Initialize(&loop, &handler, config);
+ decoder->Initialize(&loop, &handler, NULL, config);
EXPECT_EQ(MftH264Decoder::kNormal, decoder->state());
handler.SetReader(reader);
handler.SetDecoder(decoder.get());
@@ -429,7 +429,7 @@ void DecodeValidVideo(const std::string& filename, int num_frames, bool dxva) {
ASSERT_TRUE(hwnd);
scoped_ptr<MftH264Decoder> decoder(new MftH264Decoder(dxva, hwnd));
ASSERT_TRUE(decoder.get());
- decoder->Initialize(&loop, &handler, config);
+ decoder->Initialize(&loop, &handler, NULL, config);
EXPECT_EQ(MftH264Decoder::kNormal, decoder->state());
handler.SetReader(reader);
handler.SetDecoder(decoder.get());
diff --git a/media/tools/omx_test/omx_test.cc b/media/tools/omx_test/omx_test.cc
index 44b728f..505f060 100644
--- a/media/tools/omx_test/omx_test.cc
+++ b/media/tools/omx_test/omx_test.cc
@@ -189,7 +189,7 @@ class TestApp : public base::RefCountedThreadSafe<TestApp>,
config.width = av_stream_->codec->width;
config.height = av_stream_->codec->height;
engine_.reset(new OmxVideoDecodeEngine());
- engine_->Initialize(&message_loop_, this, config);
+ engine_->Initialize(&message_loop_, this, NULL, config);
// Execute the message loop so that we can run tasks on it. This call
// will return when we call message_loop_.Quit().
diff --git a/media/video/ffmpeg_video_decode_engine.cc b/media/video/ffmpeg_video_decode_engine.cc
index 75be752..7a8181b 100644
--- a/media/video/ffmpeg_video_decode_engine.cc
+++ b/media/video/ffmpeg_video_decode_engine.cc
@@ -35,6 +35,7 @@ FFmpegVideoDecodeEngine::~FFmpegVideoDecodeEngine() {
void FFmpegVideoDecodeEngine::Initialize(
MessageLoop* message_loop,
VideoDecodeEngine::EventHandler* event_handler,
+ VideoDecodeContext* context,
const VideoCodecConfig& config) {
allocator_.reset(new FFmpegVideoAllocator());
diff --git a/media/video/ffmpeg_video_decode_engine.h b/media/video/ffmpeg_video_decode_engine.h
index bc1e033..a7ce4e4 100644
--- a/media/video/ffmpeg_video_decode_engine.h
+++ b/media/video/ffmpeg_video_decode_engine.h
@@ -28,6 +28,7 @@ class FFmpegVideoDecodeEngine : public VideoDecodeEngine {
// Implementation of the VideoDecodeEngine Interface.
virtual void Initialize(MessageLoop* message_loop,
VideoDecodeEngine::EventHandler* event_handler,
+ VideoDecodeContext* context,
const VideoCodecConfig& config);
virtual void ConsumeVideoSample(scoped_refptr<Buffer> buffer);
virtual void ProduceVideoFrame(scoped_refptr<VideoFrame> frame);
diff --git a/media/video/ffmpeg_video_decode_engine_unittest.cc b/media/video/ffmpeg_video_decode_engine_unittest.cc
index 7db2f55..9f737ba 100644
--- a/media/video/ffmpeg_video_decode_engine_unittest.cc
+++ b/media/video/ffmpeg_video_decode_engine_unittest.cc
@@ -91,7 +91,7 @@ class FFmpegVideoDecodeEngineTest : public testing::Test,
config_.height = kHeight;
EXPECT_CALL(*this, OnInitializeComplete(_))
.WillOnce(SaveInitializeResult(this));
- test_engine_->Initialize(MessageLoop::current(), this, config_);
+ test_engine_->Initialize(MessageLoop::current(), this, NULL, config_);
EXPECT_TRUE(info_.success);
}
@@ -143,7 +143,7 @@ TEST_F(FFmpegVideoDecodeEngineTest, Initialize_FindDecoderFails) {
config_.height = kHeight;
EXPECT_CALL(*this, OnInitializeComplete(_))
.WillOnce(SaveInitializeResult(this));
- test_engine_->Initialize(MessageLoop::current(), this, config_);
+ test_engine_->Initialize(MessageLoop::current(), this, NULL, config_);
EXPECT_FALSE(info_.success);
}
@@ -165,7 +165,7 @@ TEST_F(FFmpegVideoDecodeEngineTest, Initialize_InitThreadFails) {
config_.height = kHeight;
EXPECT_CALL(*this, OnInitializeComplete(_))
.WillOnce(SaveInitializeResult(this));
- test_engine_->Initialize(MessageLoop::current(), this, config_);
+ test_engine_->Initialize(MessageLoop::current(), this, NULL, config_);
EXPECT_FALSE(info_.success);
}
@@ -188,7 +188,7 @@ TEST_F(FFmpegVideoDecodeEngineTest, Initialize_OpenDecoderFails) {
config_.height = kHeight;
EXPECT_CALL(*this, OnInitializeComplete(_))
.WillOnce(SaveInitializeResult(this));
- test_engine_->Initialize(MessageLoop::current(), this, config_);
+ test_engine_->Initialize(MessageLoop::current(), this, NULL, config_);
EXPECT_FALSE(info_.success);
}
diff --git a/media/video/omx_video_decode_engine.cc b/media/video/omx_video_decode_engine.cc
index e1bcc7f..0df0f1e 100644
--- a/media/video/omx_video_decode_engine.cc
+++ b/media/video/omx_video_decode_engine.cc
@@ -82,6 +82,7 @@ static void ResetParamHeader(const OmxVideoDecodeEngine& dec, T* param) {
void OmxVideoDecodeEngine::Initialize(
MessageLoop* message_loop,
VideoDecodeEngine::EventHandler* event_handler,
+ VideoDecodeContext* context,
const VideoCodecConfig& config) {
DCHECK_EQ(message_loop, MessageLoop::current());
diff --git a/media/video/omx_video_decode_engine.h b/media/video/omx_video_decode_engine.h
index 8347eed..c5b3882 100644
--- a/media/video/omx_video_decode_engine.h
+++ b/media/video/omx_video_decode_engine.h
@@ -28,6 +28,7 @@ class OmxVideoDecodeEngine : public VideoDecodeEngine {
// Implementation of the VideoDecodeEngine Interface.
virtual void Initialize(MessageLoop* message_loop,
VideoDecodeEngine::EventHandler* event_handler,
+ VideoDecodeContext* context,
const VideoCodecConfig& config);
virtual void ConsumeVideoSample(scoped_refptr<Buffer> buffer);
virtual void ProduceVideoFrame(scoped_refptr<VideoFrame> frame);
diff --git a/media/video/video_decode_context.h b/media/video/video_decode_context.h
index 0bea382..f768a0a 100644
--- a/media/video/video_decode_context.h
+++ b/media/video/video_decode_context.h
@@ -5,20 +5,30 @@
#ifndef MEDIA_VIDEO_VIDEO_DECODE_CONTEXT_H_
#define MEDIA_VIDEO_VIDEO_DECODE_CONTEXT_H_
-#include "base/callback.h"
+#include <vector>
+
+#include "base/task.h"
+#include "media/base/video_frame.h"
namespace media {
class VideoFrame;
-// A VideoDecodeContext provides resources like output video frame storage and
-// hardware decoder handle to a VideoDecodeEngine, it hides all the platform and
-// subsystem details from the decode engine.
+// A VideoDecodeContext is used by VideoDecodeEngine to provide the following
+// functions:
+//
+// 1. Provides access to hardware video decoding device.
+// 2. Allocate VideoFrame objects that are used to carry the decoded video
+// frames.
+// 3. Upload a device specific buffer to some common VideoFrame storage types.
+// In many cases a VideoDecodeEngine provides its own buffer, these buffer
+// are usually device specific and a conversion step is needed. Instead of
+// handling these many cases in the renderer a VideoDecodeContext is used
+// to convert the device specific buffer to a common storage format, e.g.
+// GL textures or system memory. This way we keep the device specific code
+// in the VideoDecodeEngine and VideoDecodeContext pair.
class VideoDecodeContext {
public:
- typedef Callback2<int, VideoFrame*[]>::Type AllocationCompleteCallback;
- typedef Callback0::Type DestructionCompleteCallback;
-
virtual ~VideoDecodeContext() {};
// Obtain a handle to the hardware video decoder device. The type of the
@@ -28,22 +38,45 @@ class VideoDecodeContext {
// If a hardware device is not needed this method should return NULL.
virtual void* GetDevice() = 0;
- // Allocate |n| video frames with dimension |width| and |height|. |callback|
+ // Allocate |n| video frames with dimension |width| and |height|. |task|
// is called when allocation has completed.
- virtual void AllocateVideoFrames(int n, size_t width, size_t height,
- AllocationCompleteCallback* callback) = 0;
+ //
+ // |frames| is the output parameter for VideFrame(s) allocated.
+ virtual void AllocateVideoFrames(
+ int n, size_t width, size_t height, VideoFrame::Format format,
+ std::vector<scoped_refptr<VideoFrame> >* frames,
+ Task* task) = 0;
- // Release video frames allocated by the context. After making this call
+ // Release all video frames allocated by the context. After making this call
// VideoDecodeEngine should not use the VideoFrame allocated because they
// could be destroyed.
- virtual void ReleaseVideoFrames(int n, VideoFrame* frames) = 0;
+ virtual void ReleaseAllVideoFrames() = 0;
+
+ // Upload a device specific buffer to a video frame. The video frame was
+ // allocated via AllocateVideoFrames().
+ // This method is used if a VideoDecodeEngine cannot write directly to a
+ // VideoFrame, e.g. upload should be done on a different thread, the subsystem
+ // require some special treatment to generate a VideoFrame. The goal is to
+ // keep VideoDecodeEngine a reusable component and also adapt to different
+ // system by having a different VideoDecodeContext.
+ //
+ // |frame| is a VideoFrame allocated via AllocateVideoFrames().
+ //
+ // |buffer| is of type void*, it is of an internal type in VideoDecodeEngine
+ // that points to the buffer that contains the video frame.
+ // Implementor should know how to handle it.
+ //
+ // |task| is executed if the operation was completed successfully.
+ // TODO(hclam): Rename this to ConvertToVideoFrame().
+ virtual void UploadToVideoFrame(void* buffer, scoped_refptr<VideoFrame> frame,
+ Task* task) = 0;
- // Destroy this context asynchronously. When the operation is done |callback|
+ // Destroy this context asynchronously. When the operation is done |task|
// is called.
//
// ReleaseVideoFrames() need to be called with all the video frames allocated
- // before making this call.
- virtual void Destroy(DestructionCompleteCallback* callback) = 0;
+ // before making this call.
+ virtual void Destroy(Task* task) = 0;
};
} // namespace media
diff --git a/media/video/video_decode_engine.h b/media/video/video_decode_engine.h
index 8736890..f9381a2 100644
--- a/media/video/video_decode_engine.h
+++ b/media/video/video_decode_engine.h
@@ -12,6 +12,7 @@
namespace media {
class Buffer;
+class VideoDecodeContext;
enum VideoCodec {
kCodecH264,
@@ -116,13 +117,19 @@ class VideoDecodeEngine {
virtual ~VideoDecodeEngine() {}
- // Initialized the engine with specified configuration. |message_loop| could
- // be NULL if every operation is synchronous. Engine should call the
- // EventHandler::OnInitializeDone() no matter finished successfully or not.
- // TODO(jiesun): remove message_loop and create thread inside openmax engine?
- // or create thread in GpuVideoDecoder and pass message loop here?
+ // Initialize the engine with specified configuration.
+ //
+ // |decode_context| is used for allocation of VideoFrame.
+ // It is important that |decode_context| is called only on |message_loop|.
+ //
+ // TODO(hclam): Currently refactoring code to use VideoDecodeContext so
+ // |context| may be NULL in some cases.
+ //
+ // Engine should call EventHandler::OnInitializeDone() whether the
+ // initialization operation finished successfully or not.
virtual void Initialize(MessageLoop* message_loop,
EventHandler* event_handler,
+ VideoDecodeContext* context,
const VideoCodecConfig& config) = 0;
// Uninitialize the engine. Engine should destroy all resources and call