summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorscherkus@chromium.org <scherkus@chromium.org@0039d316-1c4b-4281-b951-d872f2087c98>2011-04-14 22:53:39 +0000
committerscherkus@chromium.org <scherkus@chromium.org@0039d316-1c4b-4281-b951-d872f2087c98>2011-04-14 22:53:39 +0000
commitef932ed16a50a6fa0adb801c32f3d4860e880bc9 (patch)
tree76ada28fef125550a957fc868d5cb2a4013b8f93
parent5b8149fa24feeeab86de1b86ee1da6abe68165f4 (diff)
downloadchromium_src-ef932ed16a50a6fa0adb801c32f3d4860e880bc9.zip
chromium_src-ef932ed16a50a6fa0adb801c32f3d4860e880bc9.tar.gz
chromium_src-ef932ed16a50a6fa0adb801c32f3d4860e880bc9.tar.bz2
Draft version of the HW video decode tester and few other changes.
Intention is that this tester can be used to decouple HW decode accelerator integration from running the whole Chrome browser. Features: - Independent GUnit executable, which should be possible to use in autotests. - Mimics Renderer process from Gpu video pipeline perspective. * Test bench contains implementation of FakeClient which essentially mimics Renderer process from the GpuVideoDecodeAccelerator's point of view. * FakeClient runs on it's own thread and will communicate with using the IPC messages that are used also within the real use case. * FakeClient will allocate memories using same SharedMemory stuff as the real Renderer code. * Currently reads H.264 Annex B bitstream from file and parses it to NAL units before feeding to the decoder * TODO: Polish and improving the features and configurability. * TODO: GLES texture allocation for textures. - Allows building various test cases and error behaviour as well both on AcceleratedVideoDecoder interface as well as erroneous behaviour from the client. - Allows also checking expected order of calls if we want to enforce certain behaviour across various implementations. Patch by vmr@chromium.org: http://codereview.chromium.org/6720040/ BUG=none TEST=none git-svn-id: svn://svn.chromium.org/chrome/trunk/src@81663 0039d316-1c4b-4281-b951-d872f2087c98
-rw-r--r--chrome/chrome_tests.gypi31
-rw-r--r--content/common/gpu_messages.h38
-rw-r--r--content/gpu/gpu_video_decode_accelerator.cc211
-rw-r--r--content/gpu/gpu_video_decode_accelerator.h83
-rw-r--r--content/gpu/gpu_video_decode_accelerator_tester.cc897
-rw-r--r--media/media.gyp1
-rw-r--r--media/video/picture.cc72
-rw-r--r--media/video/picture.h65
-rw-r--r--media/video/video_decode_accelerator.cc15
-rw-r--r--media/video/video_decode_accelerator.h237
-rw-r--r--ppapi/c/dev/pp_video_dev.h13
-rw-r--r--ppapi/ppapi_tests.gypi2
-rw-r--r--ppapi/tests/test_video_decoder.cc46
-rw-r--r--ppapi/tests/test_video_decoder.h34
-rw-r--r--webkit/plugins/ppapi/ppb_video_decoder_impl.cc8
15 files changed, 1721 insertions, 32 deletions
diff --git a/chrome/chrome_tests.gypi b/chrome/chrome_tests.gypi
index 284b916..034ae4d 100644
--- a/chrome/chrome_tests.gypi
+++ b/chrome/chrome_tests.gypi
@@ -3294,6 +3294,37 @@
],
},
{
+ # Executable that contains tests that specifically test gpu video decoding
+ # features. Excludes for example command buffer implementation.
+ 'target_name': 'gpu_video_tests',
+ 'type': 'executable',
+ 'dependencies': [
+ 'test_support_common',
+ '../base/base.gyp:base',
+ '../base/base.gyp:base_i18n',
+ '../base/base.gyp:test_support_base',
+ '../ipc/ipc.gyp:test_support_ipc',
+ '../skia/skia.gyp:skia',
+ '../testing/gtest.gyp:gtest',
+ '../testing/gmock.gyp:gmock',
+ ],
+ 'include_dirs': [
+ '..',
+ ],
+ 'sources': [
+ '<(DEPTH)/content/common/gpu_messages.h',
+ '<(DEPTH)/content/gpu/gpu_video_decode_accelerator.cc',
+ '<(DEPTH)/content/gpu/gpu_video_decode_accelerator.h',
+ '<(DEPTH)/content/gpu/gpu_video_decode_accelerator_tester.cc',
+ '<(DEPTH)/media/video/picture.cc',
+ '<(DEPTH)/media/video/picture.h',
+ '<(DEPTH)/media/video/video_decode_accelerator.cc',
+ '<(DEPTH)/media/video/video_decode_accelerator.h',
+ '<(DEPTH)/ui/gfx/size.cc',
+ '<(DEPTH)/ui/gfx/size.h',
+ ],
+ },
+ {
'target_name': 'plugin_tests',
'type': 'executable',
'msvs_guid': 'A1CAA831-C507-4B2E-87F3-AEC63C9907F9',
diff --git a/content/common/gpu_messages.h b/content/common/gpu_messages.h
index 4e4d5c7..9ea9beb 100644
--- a/content/common/gpu_messages.h
+++ b/content/common/gpu_messages.h
@@ -65,13 +65,6 @@ IPC_STRUCT_BEGIN(GpuVideoDecoderFormatChangeParam)
IPC_STRUCT_MEMBER(base::SharedMemoryHandle, input_buffer_handle)
IPC_STRUCT_END()
-IPC_STRUCT_BEGIN(AcceleratedVideoDecoderDecodeParam)
- IPC_STRUCT_MEMBER(base::SharedMemoryHandle, input_buffer_handle)
- IPC_STRUCT_MEMBER(int32, offset)
- IPC_STRUCT_MEMBER(int32, size)
- IPC_STRUCT_MEMBER(int32, flags) // Miscellaneous flag bit mask.
-IPC_STRUCT_END()
-
#if defined(OS_MACOSX)
IPC_STRUCT_BEGIN(GpuHostMsg_AcceleratedSurfaceSetIOSurface_Params)
IPC_STRUCT_MEMBER(int32, renderer_id)
@@ -535,27 +528,31 @@ IPC_SYNC_MESSAGE_CONTROL1_1(AcceleratedVideoDecoderMsg_GetConfigs,
// Message to create the accelerated video decoder.
IPC_SYNC_MESSAGE_CONTROL1_1(AcceleratedVideoDecoderMsg_Create,
std::vector<uint32>, /* Config */
- int32) /* Decoder ID, 0 equals failure */
+ int32) /* Decoder ID, -1 equals failure */
// Send input buffer for decoding.
-IPC_MESSAGE_ROUTED4(AcceleratedVideoDecoderMsg_Decode,
+IPC_MESSAGE_ROUTED3(AcceleratedVideoDecoderMsg_Decode,
base::SharedMemoryHandle, /* input_buffer_handle */
int32, /* offset */
- int32, /* size */
- int32) /* flags */
+ int32) /* size */
// Sent from Renderer process to the GPU process to give the texture IDs for
// generated GL textures.
-IPC_MESSAGE_ROUTED2(AcceleratedVideoDecoderMsg_AssignPictureBuffer,
+IPC_MESSAGE_ROUTED3(AcceleratedVideoDecoderMsg_AssignPictureBuffer,
int32, /* Picture buffer ID */
+ base::SharedMemoryHandle, /* Pointer to sysmem output */
std::vector<uint32>) /* TextureIDs for pictures */
+// Send from Renderer process to the GPU process to recycle the given picture
+// buffer for further decoding.
IPC_MESSAGE_ROUTED1(AcceleratedVideoDecoderMsg_ReusePictureBuffer,
int32) /* Picture buffer ID */
// Send flush request to the decoder.
-IPC_MESSAGE_ROUTED1(AcceleratedVideoDecoderMsg_Flush,
- int32) /* 0 for normal flush, 1 for abort flush */
+IPC_MESSAGE_ROUTED0(AcceleratedVideoDecoderMsg_Flush)
+
+// Send abort request to the decoder.
+IPC_MESSAGE_ROUTED0(AcceleratedVideoDecoderMsg_Abort)
// Destroy and release decoder asynchronously.
IPC_SYNC_MESSAGE_CONTROL0_0(AcceleratedVideoDecoderMsg_Destroy)
@@ -563,14 +560,16 @@ IPC_SYNC_MESSAGE_CONTROL0_0(AcceleratedVideoDecoderMsg_Destroy)
//------------------------------------------------------------------------------
// Accelerated Video Decoder Host Messages
// These messages are sent from GPU process to Renderer process.
+// Inform AcceleratedVideoDecoderHost that AcceleratedVideoDecoder has been
+// created.
+
// Accelerated video decoder has consumed input buffer from transfer buffer.
-IPC_MESSAGE_ROUTED0(AcceleratedVideoDecoderHostMsg_BitstreamBufferProcessed)
+IPC_MESSAGE_ROUTED1(AcceleratedVideoDecoderHostMsg_BitstreamBufferProcessed,
+ base::SharedMemoryHandle) /* Processed buffer handle */
// Allocate video frames for output of the hardware video decoder.
-IPC_MESSAGE_ROUTED4(AcceleratedVideoDecoderHostMsg_ProvidePictureBuffers,
+IPC_MESSAGE_ROUTED2(AcceleratedVideoDecoderHostMsg_ProvidePictureBuffers,
int32, /* Number of video frames to generate */
- uint32, /* Width of the video frame */
- uint32, /* Height of the video frame */
std::vector<uint32>) /* Vector containing the dictionary
for buffer config */
@@ -586,6 +585,9 @@ IPC_MESSAGE_ROUTED1(AcceleratedVideoDecoderHostMsg_PictureReady,
// Confirm decoder has been flushed.
IPC_MESSAGE_ROUTED0(AcceleratedVideoDecoderHostMsg_FlushDone)
+// Confirm decoder has been aborted.
+IPC_MESSAGE_ROUTED0(AcceleratedVideoDecoderHostMsg_AbortDone)
+
// Decoder has faced end of stream marker in the stream.
IPC_MESSAGE_ROUTED0(AcceleratedVideoDecoderHostMsg_EndOfStream)
diff --git a/content/gpu/gpu_video_decode_accelerator.cc b/content/gpu/gpu_video_decode_accelerator.cc
new file mode 100644
index 0000000..cc1a53e
--- /dev/null
+++ b/content/gpu/gpu_video_decode_accelerator.cc
@@ -0,0 +1,211 @@
+// Copyright (c) 2011 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "content/gpu/gpu_video_decode_accelerator.h"
+
+#include <vector>
+
+#include "base/shared_memory.h"
+#include "content/common/gpu_messages.h"
+#include "ipc/ipc_message_macros.h"
+#include "ipc/ipc_message_utils.h"
+#include "media/video/picture.h"
+#include "ui/gfx/size.h"
+
+GpuVideoDecodeAccelerator::GpuVideoDecodeAccelerator(
+ IPC::Message::Sender* sender,
+ int32 host_route_id)
+ : sender_(sender),
+ route_id_(host_route_id),
+ cb_factory_(ALLOW_THIS_IN_INITIALIZER_LIST(this)) {
+}
+
+GpuVideoDecodeAccelerator::~GpuVideoDecodeAccelerator() {}
+
+bool GpuVideoDecodeAccelerator::OnMessageReceived(const IPC::Message& msg) {
+ bool handled = true;
+ IPC_BEGIN_MESSAGE_MAP(GpuVideoDecodeAccelerator, msg)
+ IPC_MESSAGE_HANDLER(AcceleratedVideoDecoderMsg_GetConfigs,
+ OnGetConfigs)
+ IPC_MESSAGE_HANDLER(AcceleratedVideoDecoderMsg_Decode,
+ OnDecode)
+ IPC_MESSAGE_HANDLER(AcceleratedVideoDecoderMsg_AssignPictureBuffer,
+ OnAssignPictureBuffer)
+ IPC_MESSAGE_HANDLER(AcceleratedVideoDecoderMsg_ReusePictureBuffer,
+ OnReusePictureBuffer)
+ IPC_MESSAGE_HANDLER(AcceleratedVideoDecoderMsg_Flush,
+ OnFlush)
+ IPC_MESSAGE_HANDLER(AcceleratedVideoDecoderMsg_Abort,
+ OnAbort)
+ IPC_MESSAGE_UNHANDLED(handled = false)
+ IPC_END_MESSAGE_MAP()
+ return handled;
+}
+
+void GpuVideoDecodeAccelerator::OnChannelConnected(int32 peer_pid) {
+ // TODO(vmr): Do we have to react on channel connections?
+}
+
+void GpuVideoDecodeAccelerator::OnChannelError() {
+ // TODO(vmr): Do we have to react on channel errors?
+}
+
+void GpuVideoDecodeAccelerator::ProvidePictureBuffers(
+ uint32 requested_num_of_buffers,
+ const std::vector<uint32>& buffer_properties) {
+ Send(new AcceleratedVideoDecoderHostMsg_ProvidePictureBuffers(
+ route_id_,
+ requested_num_of_buffers,
+ buffer_properties));
+}
+
+void GpuVideoDecodeAccelerator::DismissPictureBuffer(
+ media::VideoDecodeAccelerator::PictureBuffer* picture_buffer) {
+ DCHECK(picture_buffer);
+ // TODO(vmr): Unmap system memory here.
+ Send(new AcceleratedVideoDecoderHostMsg_DismissPictureBuffer(
+ route_id_,
+ picture_buffer->GetId()));
+}
+
+void GpuVideoDecodeAccelerator::PictureReady(
+ media::VideoDecodeAccelerator::Picture* picture) {
+ Send(new AcceleratedVideoDecoderHostMsg_PictureReady(
+ route_id_,
+ picture->picture_buffer()->GetId()));
+}
+
+void GpuVideoDecodeAccelerator::NotifyEndOfStream() {
+ Send(new AcceleratedVideoDecoderHostMsg_EndOfStream(route_id_));
+}
+
+void GpuVideoDecodeAccelerator::NotifyError(
+ media::VideoDecodeAccelerator::Error error) {
+ Send(new AcceleratedVideoDecoderHostMsg_ErrorNotification(route_id_,
+ error));
+}
+
+void GpuVideoDecodeAccelerator::OnGetConfigs(std::vector<uint32> config,
+ std::vector<uint32>* configs) {
+ DCHECK(configs);
+ *configs = video_decode_accelerator_->GetConfig(config);
+}
+
+void GpuVideoDecodeAccelerator::OnCreate(std::vector<uint32> config,
+ int32* decoder_id) {
+}
+
+void GpuVideoDecodeAccelerator::OnDecode(base::SharedMemoryHandle handle,
+ int32 offset,
+ int32 size) {
+ if (offset < 0 || size <= 0) {
+ NotifyError(media::VideoDecodeAccelerator::VIDEODECODERERROR_INVALIDINPUT);
+ return;
+ }
+ if (!base::SharedMemory::IsHandleValid(handle)) {
+ NotifyError(media::VideoDecodeAccelerator::VIDEODECODERERROR_MEMFAILURE);
+ return;
+ }
+ base::SharedMemory* shm = new base::SharedMemory(handle, true);
+ if (!shm || !shm->Map(offset + size)) {
+ NotifyError(media::VideoDecodeAccelerator::VIDEODECODERERROR_MEMFAILURE);
+ return;
+ }
+ // Set the freshly mapped memory address to the bitstream buffer.
+ uint8* mem_ptr = static_cast<uint8*>(shm->memory());
+ media::BitstreamBuffer bitstream(mem_ptr + offset, size, NULL);
+ // Store the SHM in our FIFO queue. We need to do this before Decode because
+ // it is legal to call BitstreamBufferProcessed callback from the Decode
+ // context.
+ shm_in_.push_back(shm);
+ video_decode_accelerator_->Decode(
+ &bitstream,
+ cb_factory_.NewCallback(
+ &GpuVideoDecodeAccelerator::OnBitstreamBufferProcessed));
+}
+
+void GpuVideoDecodeAccelerator::OnAssignPictureBuffer(
+ int32 picture_buffer_id,
+ base::SharedMemoryHandle handle,
+ std::vector<uint32> textures) {
+ // TODO(vmr): Get the right size for picture buffers from config.
+ gfx::Size size(320, 240);
+ uint32 bits_per_pixel = 32;
+ media::VideoDecodeAccelerator::PictureBuffer::MemoryType memory_type;
+ std::vector<media::VideoDecodeAccelerator::PictureBuffer::DataPlaneHandle>
+ planes;
+ if (handle == base::SharedMemory::NULLHandle()) {
+ // TODO(vmr): Handle GLES textures here.
+ memory_type = media::VideoDecodeAccelerator::PictureBuffer::
+ PICTUREBUFFER_MEMORYTYPE_GL_TEXTURE;
+ } else {
+ // Input buffer provided is in system memory in one plane.
+ memory_type = media::VideoDecodeAccelerator::PictureBuffer::
+ PICTUREBUFFER_MEMORYTYPE_SYSTEM;
+ if (!base::SharedMemory::IsHandleValid(handle)) {
+ NotifyError(media::VideoDecodeAccelerator::VIDEODECODERERROR_MEMFAILURE);
+ return;
+ }
+ base::SharedMemory* shm = new base::SharedMemory(handle, true);
+ if (!shm || !shm->Map(size.width() * size.height() * bits_per_pixel)) {
+ NotifyError(media::VideoDecodeAccelerator::VIDEODECODERERROR_MEMFAILURE);
+ return;
+ }
+ media::VideoDecodeAccelerator::PictureBuffer::DataPlaneHandle sysmem_plane;
+ sysmem_plane.sysmem = shm;
+ planes.push_back(sysmem_plane);
+ }
+ std::vector<media::VideoDecodeAccelerator::PictureBuffer*> bufs;
+ std::vector<uint32> color_format;
+ media::VideoDecodeAccelerator::PictureBuffer* picture_buffer =
+ new media::PictureBuffer(picture_buffer_id,
+ size,
+ color_format,
+ memory_type,
+ planes);
+ bufs.push_back(picture_buffer);
+ video_decode_accelerator_->AssignPictureBuffer(bufs);
+}
+
+void GpuVideoDecodeAccelerator::OnReusePictureBuffer(
+ int32 picture_buffer_id) {
+ // TODO(vmr): Get the picture buffer with the id.
+ media::VideoDecodeAccelerator::PictureBuffer* picture_buffer = NULL;
+ video_decode_accelerator_->ReusePictureBuffer(picture_buffer);
+}
+
+void GpuVideoDecodeAccelerator::OnFlush() {
+ if (!video_decode_accelerator_->Flush(cb_factory_.NewCallback(
+ &GpuVideoDecodeAccelerator::OnFlushDone))) {
+ NotifyError(
+ media::VideoDecodeAccelerator::VIDEODECODERERROR_UNEXPECTED_FLUSH);
+ }
+}
+
+void GpuVideoDecodeAccelerator::OnAbort() {
+ video_decode_accelerator_->Abort(cb_factory_.NewCallback(
+ &GpuVideoDecodeAccelerator::OnAbortDone));
+}
+
+void GpuVideoDecodeAccelerator::OnBitstreamBufferProcessed() {
+ base::SharedMemory* shm = shm_in_.front();
+ DCHECK(shm); // Shared memory should always be non-NULL.
+ Send(new AcceleratedVideoDecoderHostMsg_BitstreamBufferProcessed(
+ 0, shm->handle()));
+ shm_in_.pop_front();
+}
+
+void GpuVideoDecodeAccelerator::OnFlushDone() {
+ Send(new AcceleratedVideoDecoderHostMsg_FlushDone(0));
+}
+
+void GpuVideoDecodeAccelerator::OnAbortDone() {
+ Send(new AcceleratedVideoDecoderHostMsg_AbortDone(0));
+}
+
+bool GpuVideoDecodeAccelerator::Send(IPC::Message* message) {
+ DCHECK(sender_);
+ return sender_->Send(message);
+}
+
diff --git a/content/gpu/gpu_video_decode_accelerator.h b/content/gpu/gpu_video_decode_accelerator.h
new file mode 100644
index 0000000..d06f6f1
--- /dev/null
+++ b/content/gpu/gpu_video_decode_accelerator.h
@@ -0,0 +1,83 @@
+// Copyright (c) 2011 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef CONTENT_GPU_GPU_VIDEO_DECODE_ACCELERATOR_H_
+#define CONTENT_GPU_GPU_VIDEO_DECODE_ACCELERATOR_H_
+
+#include <deque>
+#include <vector>
+
+#include "base/memory/scoped_ptr.h"
+#include "base/memory/scoped_callback_factory.h"
+#include "base/shared_memory.h"
+#include "ipc/ipc_channel.h"
+#include "ipc/ipc_message.h"
+#include "media/video/video_decode_accelerator.h"
+
+class GpuVideoDecodeAccelerator
+ : public base::RefCountedThreadSafe<GpuVideoDecodeAccelerator>,
+ public IPC::Channel::Listener,
+ public IPC::Message::Sender,
+ public media::VideoDecodeAccelerator::Client {
+ public:
+ GpuVideoDecodeAccelerator(IPC::Message::Sender* sender, int32 host_route_id);
+ virtual ~GpuVideoDecodeAccelerator();
+
+ // IPC::Channel::Listener implementation.
+ virtual bool OnMessageReceived(const IPC::Message& message) OVERRIDE;
+ virtual void OnChannelConnected(int32 peer_pid) OVERRIDE;
+ virtual void OnChannelError() OVERRIDE;
+
+ // media::VideoDecodeAccelerator::Client implementation.
+ virtual void ProvidePictureBuffers(
+ uint32 requested_num_of_buffers,
+ const std::vector<uint32>& buffer_properties) OVERRIDE;
+ virtual void DismissPictureBuffer(
+ media::VideoDecodeAccelerator::PictureBuffer* picture_buffer) OVERRIDE;
+ virtual void PictureReady(
+ media::VideoDecodeAccelerator::Picture* picture) OVERRIDE;
+ virtual void NotifyEndOfStream();
+ virtual void NotifyError(media::VideoDecodeAccelerator::Error error);
+
+ // Function to delegate sending to actual sender.
+ virtual bool Send(IPC::Message* message);
+
+ void set_video_decode_accelerator(
+ media::VideoDecodeAccelerator* accelerator) {
+ video_decode_accelerator_ = accelerator;
+ }
+
+ private:
+ // Handlers for IPC messages.
+ void OnGetConfigs(std::vector<uint32> config, std::vector<uint32>* configs);
+ void OnCreate(std::vector<uint32> config, int32* decoder_id);
+ void OnDecode(base::SharedMemoryHandle handle, int32 offset, int32 size);
+ void OnAssignPictureBuffer(int32 picture_buffer_id,
+ base::SharedMemoryHandle handle,
+ std::vector<uint32> texture_ids);
+ void OnReusePictureBuffer(int32 picture_buffer_id);
+ void OnFlush();
+ void OnAbort();
+
+ // One-time callbacks from the accelerator.
+ void OnBitstreamBufferProcessed();
+ void OnFlushDone();
+ void OnAbortDone();
+
+ // Pointer to the IPC message sender.
+ IPC::Message::Sender* sender_;
+ // Route ID to communicate with the host.
+ int32 route_id_;
+ // Pointer to the underlying VideoDecodeAccelerator.
+ media::VideoDecodeAccelerator* video_decode_accelerator_;
+ // Callback factory to generate one-time callbacks.
+ base::ScopedCallbackFactory<GpuVideoDecodeAccelerator> cb_factory_;
+ // Container to hold shared memory blocks so that we can return the
+ // information about their consumption to renderer.
+ std::deque<base::SharedMemory*> shm_in_;
+
+ DISALLOW_IMPLICIT_CONSTRUCTORS(GpuVideoDecodeAccelerator);
+};
+
+#endif // CONTENT_GPU_GPU_VIDEO_DECODE_ACCELERATOR_H_
diff --git a/content/gpu/gpu_video_decode_accelerator_tester.cc b/content/gpu/gpu_video_decode_accelerator_tester.cc
new file mode 100644
index 0000000..87a679d
--- /dev/null
+++ b/content/gpu/gpu_video_decode_accelerator_tester.cc
@@ -0,0 +1,897 @@
+// Copyright (c) 2011 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// TODO(vmr): Once done with the implementation, add conclusive documentation
+// what you can do with this tester, how you can configure it and how
+// you can extend it.
+
+#include <fstream>
+#include <ios>
+#include <iostream>
+#include <new>
+#include <vector>
+
+#include "base/at_exit.h"
+#include "base/message_loop.h"
+#include "base/process.h"
+#include "base/memory/ref_counted.h"
+#include "base/shared_memory.h"
+#include "base/synchronization/waitable_event.h"
+#include "base/threading/platform_thread.h"
+#include "base/threading/thread.h"
+#include "content/common/common_param_traits.h"
+#include "content/common/gpu_messages.h"
+#include "content/gpu/gpu_video_decode_accelerator.h"
+#include "ipc/ipc_channel.h"
+#include "ipc/ipc_message.h"
+#include "ipc/ipc_message_utils.h"
+#include "media/base/bitstream_buffer.h"
+#include "media/video/picture.h"
+#include "media/video/video_decode_accelerator.h"
+#include "testing/gmock/include/gmock/gmock.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+using testing::_;
+using testing::AnyNumber;
+using testing::Invoke;
+using testing::InvokeArgument;
+using testing::Matcher;
+using testing::NiceMock;
+using testing::Return;
+using testing::ReturnRef;
+
+// Route ID.
+static const int32 kRouteId = 99;
+
+class MockGpuChannel : public IPC::Message::Sender {
+ public:
+ MockGpuChannel() {}
+ virtual ~MockGpuChannel() {}
+
+ // IPC::Message::Sender implementation.
+ MOCK_METHOD1(Send, bool(IPC::Message* msg));
+
+ private:
+ DISALLOW_COPY_AND_ASSIGN(MockGpuChannel);
+};
+
+class MockVideoDecodeAccelerator : public media::VideoDecodeAccelerator {
+ public:
+ MockVideoDecodeAccelerator() {}
+ virtual ~MockVideoDecodeAccelerator() {}
+
+ MOCK_METHOD1(GetConfig,
+ const std::vector<uint32>&
+ (const std::vector<uint32>& prototype_config));
+ MOCK_METHOD1(Initialize, bool(const std::vector<uint32>& config));
+ MOCK_METHOD2(Decode, bool(media::BitstreamBuffer* bitstream_buffer,
+ media::VideoDecodeAcceleratorCallback* callback));
+ MOCK_METHOD1(AssignPictureBuffer,
+ void(std::vector<media::VideoDecodeAccelerator::PictureBuffer*>
+ picture_buffers));
+ MOCK_METHOD1(ReusePictureBuffer,
+ void(media::VideoDecodeAccelerator::PictureBuffer*
+ picture_buffer));
+ MOCK_METHOD1(Flush, bool(media::VideoDecodeAcceleratorCallback* callback));
+ MOCK_METHOD1(Abort, bool(media::VideoDecodeAcceleratorCallback* callback));
+
+ private:
+ DISALLOW_COPY_AND_ASSIGN(MockVideoDecodeAccelerator);
+};
+
+// Pull-based video source to read video data from a file.
+// TODO(vmr): Make this less of a memory hog, Now reads whole file into mem in
+// the beginning. Do this when it actually becomes a problem.
+class TestVideoSource {
+ public:
+ TestVideoSource()
+ : file_length_(0),
+ offset_(0) {}
+
+ ~TestVideoSource() {}
+
+ bool Open(const std::string& url) {
+ // TODO(vmr): Use file_util::ReadFileToString or equivalent to read the file
+ // if one-shot reading is used.
+ scoped_ptr<std::ifstream> file;
+ file.reset(
+ new std::ifstream(url.c_str(),
+ std::ios::in | std::ios::binary | std::ios::ate));
+ if (!file->good()) {
+ DLOG(ERROR) << "Failed to open file \"" << url << "\".";
+ return false;
+ }
+ file->seekg(0, std::ios::end);
+ uint32 length = file->tellg();
+ file->seekg(0, std::ios::beg);
+ mem_.reset(new uint8[length]);
+ DCHECK(mem_.get()); // Simply assumed to work on tester.
+ file->read(reinterpret_cast<char*>(mem_.get()), length);
+ file_length_ = length;
+ file->close();
+ return true;
+ }
+
+ // Reads next packet from the input stream.
+ // Returns number of read bytes on success, 0 on when there was no valid data
+ // to be read and -1 if user gave NULL or too small buffer.
+ // TODO(vmr): Modify to differentiate between errors and EOF.
+ int32 Read(uint8* target_mem, uint32 size) {
+ if (!target_mem)
+ return -1;
+ uint8* unit_begin = NULL;
+ uint8* unit_end = NULL;
+ uint8* ptr = mem_.get() + offset_;
+ while (offset_ + 4 < file_length_) {
+ if (ptr[0] == 0 && ptr[1] == 0 && ptr[2] == 0 && ptr[3] == 1) {
+ // start code found
+ if (!unit_begin) {
+ unit_begin = ptr;
+ } else {
+ // back-up 1 byte.
+ unit_end = ptr;
+ break;
+ }
+ }
+ ptr++;
+ offset_++;
+ }
+ if (unit_begin && offset_ + 4 == file_length_) {
+ // Last unit. Set the unit_end to point to the last byte.
+ unit_end = ptr + 4;
+ offset_ += 4;
+ } else if (!unit_begin || !unit_end) {
+ // No unit start codes found in buffer.
+ return 0;
+ }
+ if (static_cast<int32>(size) >= unit_end - unit_begin) {
+ memcpy(target_mem, unit_begin, unit_end - unit_begin);
+ return unit_end - unit_begin;
+ }
+ // Rewind to the beginning start code if there is one as it should be
+ // returned with next Read().
+ offset_ = unit_begin - mem_.get();
+ return -1;
+ }
+
+ private:
+ uint32 file_length_;
+ uint32 offset_;
+ scoped_array<uint8> mem_;
+};
+
+// Class for posting QuitTask to other message loop when observed message loop
+// is quitting. Observer must be added to the message loop by calling
+// AddDestructionObserver on the message loop to be tracked.
+class QuitObserver
+ : public MessageLoop::DestructionObserver {
+ public:
+ explicit QuitObserver(MessageLoop* loop_to_quit)
+ : loop_to_quit_(loop_to_quit) {
+ }
+ ~QuitObserver() {}
+
+ void WillDestroyCurrentMessageLoop() {
+ loop_to_quit_->PostTask(FROM_HERE, new MessageLoop::QuitTask());
+ }
+
+ protected:
+ MessageLoop* loop_to_quit_;
+
+ DISALLOW_IMPLICIT_CONSTRUCTORS(QuitObserver);
+};
+
+// FakeClient is a class that mimics normal operation from the client
+// process perspective. Underlying code will be receiving IPC commands from the
+// FakeClient and sending IPC commands back to the FakeClient just as the
+// underlying code would do with actual process running on top of it.
+class FakeClient
+ : public base::RefCountedThreadSafe<FakeClient>,
+ public IPC::Message::Sender,
+ public base::PlatformThread::Delegate {
+ public:
+ FakeClient(MessageLoop* message_loop_for_quit,
+ IPC::Channel::Listener* gpu_video_decode_accelerator_,
+ std::string video_source_filename)
+ : fake_gpu_process_(gpu_video_decode_accelerator_),
+ message_loop_for_quit_(message_loop_for_quit),
+ test_video_source_(),
+ assigned_buffer_count_(0),
+ end_of_stream_(false),
+ error_(false),
+ thread_initialized_event_(true, false), // Manual reset & unsignalled.
+ video_source_filename_(video_source_filename) {
+ // Start-up the thread for processing incoming IPC from GpuVideoDecoder and
+ // wait until it has finished setting up the message loop.
+ base::PlatformThread::Create(0, this, &thread_handle_);
+ while (!thread_initialized_event_.Wait()) {
+ // Do nothing but wait.
+ };
+
+ // Create 1Mb of shared memory.
+ shm_.reset(new base::SharedMemory());
+ CHECK(shm_->CreateAndMapAnonymous(1024 * 1024));
+ }
+
+ virtual ~FakeClient() {
+ DCHECK_EQ(assigned_buffer_count_, 0);
+ }
+
+ bool DispatchFirstDecode() {
+ if (!test_video_source_.Open(video_source_filename_)) {
+ return false;
+ }
+ message_loop_->PostTask(
+ FROM_HERE,
+ NewRunnableMethod(this, &FakeClient::OnBitstreamBufferProcessed,
+ base::SharedMemory::NULLHandle()));
+ return true;
+ }
+
+ // PlatformThread::Delegate implementation, i.e. the thread where the
+ // GpuVideoDecoder IPC handling executes.
+ virtual void ThreadMain() {
+ message_loop_.reset(new MessageLoop());
+ // Set the test done observer to observe when client is done.
+ test_done_observer_.reset(new QuitObserver(message_loop_for_quit_));
+ message_loop_->AddDestructionObserver(test_done_observer_.get());
+ thread_initialized_event_.Signal();
+ message_loop_->Run();
+ message_loop_.reset(); // Destroy the message_loop_.
+ }
+
+ // IPC::Message::Sender implementation.
+ virtual bool Send(IPC::Message* msg) {
+ // Dispatch the message loops to the single message loop which we want to
+ // execute all the simulated IPC.
+ if (MessageLoop::current() != message_loop_) {
+ message_loop_->PostTask(
+ FROM_HERE,
+ NewRunnableMethod(this,
+ &FakeClient::Send,
+ msg));
+ return true;
+ }
+ // This execution should happen in the context of our fake GPU thread.
+ CHECK(msg);
+ LogMessage(msg);
+ IPC_BEGIN_MESSAGE_MAP(FakeClient, *msg)
+ IPC_MESSAGE_HANDLER(
+ AcceleratedVideoDecoderHostMsg_BitstreamBufferProcessed,
+ OnBitstreamBufferProcessed)
+ IPC_MESSAGE_HANDLER(AcceleratedVideoDecoderHostMsg_ProvidePictureBuffers,
+ OnProvidePictureBuffers)
+ IPC_MESSAGE_HANDLER(AcceleratedVideoDecoderHostMsg_DismissPictureBuffer,
+ OnDismissPictureBuffer)
+ IPC_MESSAGE_HANDLER(AcceleratedVideoDecoderHostMsg_PictureReady,
+ OnPictureReady)
+ IPC_MESSAGE_HANDLER(AcceleratedVideoDecoderHostMsg_FlushDone,
+ OnFlushDone)
+ IPC_MESSAGE_HANDLER(AcceleratedVideoDecoderHostMsg_AbortDone,
+ OnAbortDone)
+ IPC_MESSAGE_HANDLER(AcceleratedVideoDecoderHostMsg_EndOfStream,
+ OnEndOfStream)
+ IPC_MESSAGE_HANDLER(AcceleratedVideoDecoderHostMsg_ErrorNotification,
+ OnError)
+ IPC_MESSAGE_UNHANDLED_ERROR()
+ IPC_END_MESSAGE_MAP()
+ delete msg;
+ return true;
+ }
+
+ virtual void OnBitstreamBufferProcessed(base::SharedMemoryHandle handle) {
+ // No action on end of stream.
+ if (end_of_stream_)
+ return;
+ uint32 read = test_video_source_.Read(
+ reinterpret_cast<uint8*>(shm_->memory()),
+ shm_->created_size());
+ if (read > 0) {
+ AcceleratedVideoDecoderMsg_Decode msg(kRouteId,
+ shm_->handle(),
+ 0,
+ read);
+ fake_gpu_process_->OnMessageReceived(msg);
+ } else if (read == 0) {
+ // If no more data, flush to get the rest out.
+ end_of_stream_ = true;
+ AcceleratedVideoDecoderMsg_Flush msg(kRouteId);
+ fake_gpu_process_->OnMessageReceived(msg);
+ } else {
+ // Error. Let's flush and abort.
+ error_ = true;
+ AcceleratedVideoDecoderMsg_Flush msg(kRouteId);
+ fake_gpu_process_->OnMessageReceived(msg);
+ }
+ }
+
+ virtual void OnProvidePictureBuffers(int32 num_frames,
+ std::vector<uint32> config) {
+ // Allocate and assign the picture buffers.
+ std::vector<uint32> textures;
+ // TODO(vmr): Get from config.
+ uint32 width = 320;
+ uint32 height = 240;
+ uint32 bits_per_pixel = 32;
+ for (int32 i = 0; i < num_frames; i++) {
+ // Create the shared memory, send it and store it into our local map.
+ base::SharedMemory* shm = new base::SharedMemory();
+ CHECK(shm->CreateAnonymous(width * height * bits_per_pixel / 8));
+ shm_map_[next_picture_buffer_id_] = shm;
+ AcceleratedVideoDecoderMsg_AssignPictureBuffer msg(
+ kRouteId,
+ next_picture_buffer_id_,
+ shm->handle(),
+ textures);
+ fake_gpu_process_->OnMessageReceived(msg);
+ next_picture_buffer_id_++;
+ assigned_buffer_count_++;
+ }
+ }
+
+ virtual void OnDismissPictureBuffer(int32 picture_buffer_id) {
+ // Free previously allocated buffers.
+ base::SharedMemory* shm = shm_map_[picture_buffer_id];
+ shm_map_.erase(picture_buffer_id);
+ delete shm; // Will also close shared memory.
+ assigned_buffer_count_--;
+ }
+
+ virtual void OnPictureReady(int32 picture_buffer_id) {
+ // Process & recycle picture buffer.
+ AcceleratedVideoDecoderMsg_ReusePictureBuffer msg(kRouteId,
+ picture_buffer_id);
+ fake_gpu_process_->OnMessageReceived(msg);
+ }
+
+ virtual void OnFlushDone() {
+ // TODO(vmr): Check that we had been actually flushing.
+ if (end_of_stream_ || error_) {
+ // Send the final Abort request.
+ AcceleratedVideoDecoderMsg_Abort msg(kRouteId);
+ fake_gpu_process_->OnMessageReceived(msg);
+ }
+ }
+
+ virtual void OnAbortDone() {
+ // Done aborting... case over.
+ message_loop_->QuitNow();
+ }
+
+ virtual void OnEndOfStream() {
+ end_of_stream_ = true;
+ AcceleratedVideoDecoderMsg_Flush msg(kRouteId);
+ fake_gpu_process_->OnMessageReceived(msg);
+ }
+
+ virtual void OnError(uint32 error_id) {
+ // Send the final Abort request.
+ AcceleratedVideoDecoderMsg_Abort msg(kRouteId);
+ fake_gpu_process_->OnMessageReceived(msg);
+ }
+
+ private:
+ // TODO(vmr): Remove redundant logging for IPC calls with proper Chromium
+ // logging facilities.
+ void LogMessage(IPC::Message* msg) {
+ switch (msg->type()) {
+ case AcceleratedVideoDecoderHostMsg_BitstreamBufferProcessed::ID:
+ DLOG(INFO) << "CLIENT << "
+ "AcceleratedVideoDecoderHostMsg_BitstreamBufferProcessed";
+ break;
+ case AcceleratedVideoDecoderHostMsg_ProvidePictureBuffers::ID:
+ DLOG(INFO) << "CLIENT << "
+ "AcceleratedVideoDecoderHostMsg_ProvidePictureBuffers";
+ break;
+ case AcceleratedVideoDecoderHostMsg_DismissPictureBuffer::ID:
+ DLOG(INFO) << "CLIENT << "
+ "AcceleratedVideoDecoderHostMsg_DismissPictureBuffer";
+ break;
+ case AcceleratedVideoDecoderHostMsg_PictureReady::ID:
+ DLOG(INFO) << "CLIENT << AcceleratedVideoDecoderHostMsg_PictureReady";
+ break;
+ case AcceleratedVideoDecoderHostMsg_FlushDone::ID:
+ DLOG(INFO) << "CLIENT << AcceleratedVideoDecoderHostMsg_FlushDone";
+ break;
+ case AcceleratedVideoDecoderHostMsg_AbortDone::ID:
+ DLOG(INFO) << "CLIENT << AcceleratedVideoDecoderHostMsg_AbortDone";
+ break;
+ case AcceleratedVideoDecoderHostMsg_EndOfStream::ID:
+ DLOG(INFO) << "CLIENT << AcceleratedVideoDecoderHostMsg_EndOfStream";
+ break;
+ case AcceleratedVideoDecoderHostMsg_ErrorNotification::ID:
+ DLOG(INFO) << "CLIENT << "
+ "AcceleratedVideoDecoderHostMsg_ErrorNotification";
+ break;
+ default:
+ DLOG(INFO) << "CLIENT << UNKNOWN MESSAGE";
+ break;
+ }
+ }
+
+ // Listener which should receive the messages we decide to generate.
+ IPC::Channel::Listener* fake_gpu_process_;
+ // Message loop into which we want to post any send messages.
+ scoped_ptr<MessageLoop> message_loop_;
+ // Message loop into which we want to post quit when we're done.
+ MessageLoop* message_loop_for_quit_;
+ // Test video source to read our data from.
+ TestVideoSource test_video_source_;
+ // SharedMemory used for input buffers.
+ scoped_ptr<base::SharedMemory> shm_;
+ // Incremental picture buffer id.
+ uint32 next_picture_buffer_id_;
+ // Counter to count assigned buffers.
+ int32 assigned_buffer_count_;
+ // Counter to count locked buffers.
+ uint32 locked_buffer_count_;
+ // Flag to determine whether we have received end of stream from decoder.
+ bool end_of_stream_;
+ // Flag to determine whether we have faced an error.
+ bool error_;
+ // Event to determine the initialization state of the fake client thread.
+ base::WaitableEvent thread_initialized_event_;
+ // We own the observer used to track messageloop destruction.
+ scoped_ptr<QuitObserver> test_done_observer_;
+ // Handle to the thread.
+ base::PlatformThreadHandle thread_handle_;
+ // File name for video.
+ std::string video_source_filename_;
+ // Map to hold the picture buffer objects we create.
+ std::map<int32, base::SharedMemory*> shm_map_;
+
+ DISALLOW_IMPLICIT_CONSTRUCTORS(FakeClient);
+};
+
+// Class to fake the regular behaviour of video decode accelerator.
+class FakeVideoDecodeAccelerator : public media::VideoDecodeAccelerator {
+ public:
+ explicit FakeVideoDecodeAccelerator(
+ media::VideoDecodeAccelerator::Client* client)
+ : state_(IDLE),
+ end_of_stream_(false),
+ client_(client),
+ output_counter_(0) {
+ }
+ virtual ~FakeVideoDecodeAccelerator() {}
+
+ virtual const std::vector<uint32>& GetConfig(
+ const std::vector<uint32>& prototype_config) {
+ return config_;
+ }
+
+ virtual bool Initialize(const std::vector<uint32>& config) {
+ return config_ == config;
+ }
+
+ // |bitstream_buffer| is owned by client and client guarantees it will not
+ // release it before |callback| is called.
+ virtual bool Decode(media::BitstreamBuffer* bitstream_buffer,
+ media::VideoDecodeAcceleratorCallback* callback) {
+ if (assigned_picture_buffers_.empty()) {
+ std::vector<uint32> cfg;
+ bitstream_buffer_cb_.reset(callback);
+ client_->ProvidePictureBuffers(2, cfg);
+ return true;
+ }
+ // Simulate the bitstream processed callback.
+ if (!end_of_stream_) {
+ callback->Run();
+ delete callback;
+ }
+ // Then call the picture ready.
+ // TODO(vmr): Add container for locked picture buffers.
+ if (output_counter_ > 300) {
+ if (!end_of_stream_) {
+ client_->NotifyEndOfStream();
+ }
+ end_of_stream_ = true;
+ return true;
+ }
+ media::PictureBuffer* picture_buffer =
+ reinterpret_cast<media::PictureBuffer*>(assigned_picture_buffers_.at(
+ output_counter_ % assigned_picture_buffers_.size()));
+ // TODO(vmr): Get the real values.
+ gfx::Size size(320, 240);
+ std::vector<uint32> color_format;
+ // TODO(vmr): Add the correct mechanism to recycle buffers from assigned to
+ // locked and back.
+ client_->PictureReady(new media::Picture(picture_buffer, size, size, NULL));
+ output_counter_++;
+ return true;
+ }
+
+ virtual void AssignPictureBuffer(
+ std::vector<media::VideoDecodeAccelerator::PictureBuffer*>
+ picture_buffers) {
+ assigned_picture_buffers_.insert(
+ assigned_picture_buffers_.begin(),
+ picture_buffers.begin(),
+ picture_buffers.end());
+
+ if (EnoughPictureBuffers()) {
+ ChangeState(OPERATIONAL);
+ bitstream_buffer_cb_->Run();
+ bitstream_buffer_cb_.reset();
+ }
+ }
+
+ virtual void ReusePictureBuffer(
+ media::VideoDecodeAccelerator::PictureBuffer* picture_buffer) {
+ // TODO(vmr): Move the picture buffer from locked picture buffer container
+ // to the assigned picture buffer container.
+ }
+
+ bool Flush(media::VideoDecodeAcceleratorCallback* callback) {
+ scoped_ptr<media::VideoDecodeAcceleratorCallback> flush_cb(callback);
+ if (state_ != OPERATIONAL) {
+ // Flush request is accepted only in OPERATIONAL state.
+ return false;
+ }
+ ChangeState(FLUSHING);
+ ChangeState(OPERATIONAL);
+ flush_cb->Run();
+ return true;
+ }
+
+ bool Abort(media::VideoDecodeAcceleratorCallback* callback) {
+ scoped_ptr<media::VideoDecodeAcceleratorCallback> abort_cb(callback);
+ if (state_ == UNINITIALIZED || state_ == ABORTING) {
+ // Flush requested accepted in all other states.
+ return false;
+ }
+ ChangeState(ABORTING);
+ // Stop the component here.
+ // As buffers are released callback for each buffer DismissBuffer.
+ while (!assigned_picture_buffers_.empty()) {
+ client_->DismissPictureBuffer(assigned_picture_buffers_.back());
+ assigned_picture_buffers_.pop_back();
+ }
+ ChangeState(IDLE);
+ abort_cb->Run();
+ return true;
+ }
+
+ private:
+ enum DecodingState {
+ UNINITIALIZED, // Component has not been configured.
+ IDLE, // Component has been initialized but does not have needed resources.
+ OPERATIONAL, // Component is operational with all resources assigned.
+ FLUSHING, // Component is flushing.
+ ABORTING, // Component is aborting.
+ } state_;
+
+ static const char* kStateString[5];
+
+ void ChangeState(DecodingState new_state) {
+ DLOG(INFO) << "VideoDecodeAccelerator state change: "
+ << kStateString[state_] << " => " << kStateString[new_state];
+ state_ = new_state;
+ }
+
+ bool EnoughPictureBuffers() {
+ return assigned_picture_buffers_.size() >= 2;
+ }
+
+ bool end_of_stream_;
+ std::vector<uint32> config_;
+ std::vector<media::VideoDecodeAccelerator::PictureBuffer*>
+ assigned_picture_buffers_;
+ std::vector<media::VideoDecodeAccelerator::PictureBuffer*>
+ locked_picture_buffers;
+ media::VideoDecodeAccelerator::Client* client_;
+ scoped_ptr<media::VideoDecodeAcceleratorCallback> bitstream_buffer_cb_;
+ uint32 output_counter_;
+
+ DISALLOW_IMPLICIT_CONSTRUCTORS(FakeVideoDecodeAccelerator);
+};
+
+const char* FakeVideoDecodeAccelerator::kStateString[5] = {
+ "UNINITIALIZED",
+ "IDLE",
+ "OPERATIONAL",
+ "FLUSHING",
+ "ABORTING",
+};
+
+// Class to simulate the GpuThread. It will simply simulate the listener
+// interface to GPU video decoder by making sure they get ran in our simulated
+// GpuThread's context. The thread where loop is created will be used to
+// simulate the GpuThread in the actual gpu process environment. User is
+// responsible for calling the Run() when it is ready to start processing the
+// input messages. Calling Run() will block until QuitTask has been posted to
+// the message loop.
+class FakeGpuThread
+ : public base::RefCountedThreadSafe<FakeGpuThread>,
+ public IPC::Channel::Listener {
+ public:
+ explicit FakeGpuThread(IPC::Channel::Listener* listener)
+ : listener_(listener) {
+ message_loop_.reset(new MessageLoop());
+ }
+
+ // Run will run the message loop.
+ void Run() {
+ message_loop_->Run();
+ }
+ // Returns pointer to the message loop this class has initialized.
+ MessageLoop* message_loop() {
+ return message_loop_.get();
+ }
+
+ // IPC::Channel::Listener implementation.
+ bool OnMessageReceived(const IPC::Message& message) {
+ // Dispatch the message loops to the single message loop which we want to
+ // execute all the simulated IPC.
+ if (MessageLoop::current() != message_loop_.get()) {
+ message_loop_->PostTask(
+ FROM_HERE,
+ NewRunnableMethod(
+ this,
+ &FakeGpuThread::OnMessageReceived,
+ message));
+ return true;
+ }
+ LogMessage(message);
+ return listener_->OnMessageReceived(message);
+ }
+ void OnChannelConnected(int32 peer_pid) {
+ // Dispatch the message loops to the single message loop which we want to
+ // execute all the simulated IPC.
+ if (MessageLoop::current() != message_loop_.get()) {
+ message_loop_->PostTask(
+ FROM_HERE,
+ NewRunnableMethod(
+ this,
+ &FakeGpuThread::OnChannelConnected,
+ peer_pid));
+ return;
+ }
+ listener_->OnChannelConnected(peer_pid);
+ }
+ void OnChannelError() {
+ // Dispatch the message loops to the single message loop which we want to
+ // execute all the simulated IPC.
+ if (MessageLoop::current() != message_loop_.get()) {
+ message_loop_->PostTask(
+ FROM_HERE,
+ NewRunnableMethod(
+ this,
+ &FakeGpuThread::OnChannelError));
+ return;
+ }
+ listener_->OnChannelError();
+ }
+
+ private:
+ // TODO(vmr): Use proper Chrome IPC logging instead.
+ void LogMessage(const IPC::Message& msg) {
+ switch (msg.type()) {
+ case AcceleratedVideoDecoderMsg_GetConfigs::ID:
+ DLOG(INFO) << "GPU << AcceleratedVideoDecoderMsg_GetConfigs";
+ break;
+ case AcceleratedVideoDecoderMsg_Create::ID:
+ DLOG(INFO) << "GPU << AcceleratedVideoDecoderMsg_Create";
+ break;
+ case AcceleratedVideoDecoderMsg_Decode::ID:
+ DLOG(INFO) << "GPU << AcceleratedVideoDecoderMsg_Decode";
+ break;
+ case AcceleratedVideoDecoderMsg_AssignPictureBuffer::ID:
+ DLOG(INFO) << "GPU << AcceleratedVideoDecoderMsg_AssignPictureBuffer";
+ break;
+ case AcceleratedVideoDecoderMsg_ReusePictureBuffer::ID:
+ DLOG(INFO) << "GPU << AcceleratedVideoDecoderMsg_ReusePictureBuffer";
+ break;
+ case AcceleratedVideoDecoderMsg_Flush::ID:
+ DLOG(INFO) << "GPU << AcceleratedVideoDecoderMsg_Flush";
+ break;
+ case AcceleratedVideoDecoderMsg_Abort::ID:
+ DLOG(INFO) << "GPU << AcceleratedVideoDecoderMsg_Abort";
+ break;
+ case AcceleratedVideoDecoderMsg_Destroy::ID:
+ DLOG(INFO) << "GPU << AcceleratedVideoDecoderMsg_Destroy";
+ break;
+ default:
+ DLOG(INFO) << "GPU << UNKNOWN MESSAGE";
+ break;
+ }
+ }
+
+ // Pointer to the listener where the messages are to be dispatched.
+ IPC::Channel::Listener* listener_;
+ // MessageLoop where the GpuVideoDecodeAccelerator execution is supposed to
+ // run in.
+ scoped_ptr<MessageLoop> message_loop_;
+
+ // To make sure that only scoped_refptr can release us.
+ friend class base::RefCountedThreadSafe<FakeGpuThread>;
+ virtual ~FakeGpuThread() {}
+};
+
+// GpuVideoDecodeAcceleratorTest is the main test class that owns all the other
+// objects and does global setup and teardown.
+class GpuVideoDecodeAcceleratorTest
+ : public ::testing::TestWithParam<const char*>,
+ public IPC::Channel::Listener {
+ public:
+ GpuVideoDecodeAcceleratorTest() {}
+ virtual ~GpuVideoDecodeAcceleratorTest() {}
+
+ // Functions for GUnit test fixture setups.
+ void SetUp() {
+ // Sets up the message loop pretending the fake client.
+ SetUpFakeClientAndGpuThread();
+
+ // Initialize the GPU video decode accelerator with the mock underlying
+ // accelerator.
+ gpu_video_decode_accelerator_ =
+ new GpuVideoDecodeAccelerator(fake_client_.get(), kRouteId);
+ gpu_video_decode_accelerator_->set_video_decode_accelerator(
+ &mock_video_decode_accelerator_);
+ // Set-up the underlying video decode accelerator.
+ video_decode_accelerator_.reset(
+ new FakeVideoDecodeAccelerator(gpu_video_decode_accelerator_.get()));
+ std::vector<uint32> config;
+ video_decode_accelerator_->Initialize(config);
+
+ // Set up the default mock behaviour.
+ SetUpDefaultMockGpuChannelDelegation();
+ SetUpMockVideoDecodeAcceleratorDelegation();
+ }
+
+ void Teardown() {}
+
+ void SetUpFakeClientAndGpuThread() {
+ // Create the message loop for the IO thread (current thread).
+ // Also implements passing channel messages automatically to this thread.
+ fake_gpu_thread_ = new FakeGpuThread(this);
+
+ // Initialize the fake client to inform our io message loop and use the
+ // fresh FakeGpuThread object to send the simulated IPC to the decoder.
+ fake_client_ = new FakeClient(
+ fake_gpu_thread_->message_loop(), fake_gpu_thread_.get(),
+ GetParam());
+ }
+
+ void SetUpDefaultMockGpuChannelDelegation() {
+ // Set the MockGpuChannel to call by default always the FakeClient when
+ // anything is called from the GpuChannel. This will simulate default flow
+ // of the video decoding.
+ ON_CALL(mock_gpu_channel_, Send(_))
+ .WillByDefault(Invoke(fake_client_.get(), &FakeClient::Send));
+ }
+
+ void SetUpMockVideoDecodeAcceleratorDelegation() {
+ // Set the MockVideoDecodeAccelerator to call by the used decode
+ // accelerator.
+ // Default builds against fake.
+ ON_CALL(mock_video_decode_accelerator_, GetConfig(_))
+ .WillByDefault(Invoke(video_decode_accelerator_.get(),
+ &FakeVideoDecodeAccelerator::GetConfig));
+ ON_CALL(mock_video_decode_accelerator_, Initialize(_))
+ .WillByDefault(Invoke(video_decode_accelerator_.get(),
+ &FakeVideoDecodeAccelerator::Initialize));
+ ON_CALL(mock_video_decode_accelerator_, Decode(_, _))
+ .WillByDefault(Invoke(video_decode_accelerator_.get(),
+ &FakeVideoDecodeAccelerator::Decode));
+ ON_CALL(mock_video_decode_accelerator_, AssignPictureBuffer(_))
+ .WillByDefault(Invoke(video_decode_accelerator_.get(),
+ &FakeVideoDecodeAccelerator::AssignPictureBuffer));
+ ON_CALL(mock_video_decode_accelerator_, ReusePictureBuffer(_))
+ .WillByDefault(Invoke(video_decode_accelerator_.get(),
+ &FakeVideoDecodeAccelerator::ReusePictureBuffer));
+ ON_CALL(mock_video_decode_accelerator_, Flush(_))
+ .WillByDefault(Invoke(video_decode_accelerator_.get(),
+ &FakeVideoDecodeAccelerator::Flush));
+ ON_CALL(mock_video_decode_accelerator_, Abort(_))
+ .WillByDefault(Invoke(video_decode_accelerator_.get(),
+ &FakeVideoDecodeAccelerator::Abort));
+ }
+
+ // IPC::Channel::Listener implementation.
+ bool OnMessageReceived(const IPC::Message& message) {
+ DCHECK(gpu_video_decode_accelerator_.get());
+ return gpu_video_decode_accelerator_->OnMessageReceived(message);
+ }
+ void OnChannelConnected(int32 peer_pid) {
+ DCHECK(gpu_video_decode_accelerator_.get());
+ gpu_video_decode_accelerator_->OnChannelConnected(peer_pid);
+ }
+ void OnChannelError() {
+ DCHECK(gpu_video_decode_accelerator_.get());
+ gpu_video_decode_accelerator_->OnChannelError();
+ }
+
+ // Threading related functions.
+ void RunMessageLoop() {
+ fake_gpu_thread_->Run();
+ }
+
+ protected:
+ // We need exit manager to please the message loops we're creating.
+ base::AtExitManager exit_manager_;
+ // Mock and fake delegate for the IPC channel.
+ NiceMock<MockGpuChannel> mock_gpu_channel_;
+ // Reference counted pointer to the gpu channel.
+ scoped_refptr<FakeClient> fake_client_;
+ // Reference counted pointer to the fake gpu channel.
+ scoped_refptr<FakeGpuThread> fake_gpu_thread_;
+ // Handle to the initialized GpuVideoDecodeAccelerator. Tester owns the
+ // GpuVideoDecodeAccelerator.
+ scoped_refptr<GpuVideoDecodeAccelerator> gpu_video_decode_accelerator_;
+ // Mock and default fake delegate for the underlying video decode accelerator.
+ NiceMock<MockVideoDecodeAccelerator> mock_video_decode_accelerator_;
+ scoped_ptr<FakeVideoDecodeAccelerator> video_decode_accelerator_;
+};
+
+static uint32 kTestH264BitstreamConfig[] = {
+ // Intentionally breaking formatting rules to make things more readable.
+ media::VIDEOATTRIBUTEKEY_BITSTREAMFORMAT_FOURCC,
+ media::VIDEOCODECFOURCC_H264,
+ media::VIDEOATTRIBUTEKEY_BITSTREAMFORMAT_H264_PROFILE,
+ media::H264PROFILE_BASELINE,
+ media::VIDEOATTRIBUTEKEY_BITSTREAMFORMAT_H264_LEVEL,
+ media::H264LEVEL_30,
+ media::VIDEOATTRIBUTEKEY_BITSTREAMFORMAT_H264_PAYLOADFORMAT,
+ media::H264PAYLOADFORMAT_BYTESTREAM,
+ media::VIDEOATTRIBUTEKEY_TERMINATOR
+};
+
+static uint32 kTestH264BitstreamConfigCount =
+ sizeof(kTestH264BitstreamConfig) / sizeof(kTestH264BitstreamConfig[0]);
+
+static std::vector<uint32> kTestH264BitstreamConfigVector(
+ kTestH264BitstreamConfig,
+ kTestH264BitstreamConfig+kTestH264BitstreamConfigCount);
+
+static uint32 kTestColorConfig[] = {
+ media::VIDEOATTRIBUTEKEY_COLORFORMAT_PLANE_PIXEL_SIZE, 16,
+ media::VIDEOATTRIBUTEKEY_COLORFORMAT_RED_SIZE, 5,
+ media::VIDEOATTRIBUTEKEY_COLORFORMAT_GREEN_SIZE, 6,
+ media::VIDEOATTRIBUTEKEY_COLORFORMAT_BLUE_SIZE, 5,
+ media::VIDEOATTRIBUTEKEY_TERMINATOR
+};
+
+static uint32 kTestColorConfigCount =
+ sizeof(kTestColorConfig) / sizeof(kTestColorConfig[0]);
+
+static std::vector<uint32> kTestColorConfigVector(
+ kTestColorConfig, kTestColorConfig+kTestColorConfigCount);
+
+MATCHER(ErrorMatcher, std::string("Decoder reported unexpected error")) {
+ return arg->type() == AcceleratedVideoDecoderHostMsg_ErrorNotification::ID;
+}
+
+TEST_P(GpuVideoDecodeAcceleratorTest, RegularDecodingFlow) {
+ // We will carry out the creation of video decoder manually with specified
+ // configuration which we know will work. The rest of the functionality is
+ // asynchronous and is designed to be tested with this test bench.
+
+ // TODO(vmr): Remove the first decode from here after proper init.
+ if (!fake_client_->DispatchFirstDecode()) {
+ FAIL() << "Failed to open input file";
+ return;
+ }
+
+ // TODO(vmr): Verify how we are going to not allow any sending of data.
+ // EXPECT_CALL(mock_gpu_channel_, Send(ErrorMatcher()));
+ // .Times(0);
+
+ RunMessageLoop();
+ // Once message loop has finished our case is over.
+}
+
+const char* kFileNames[] = {
+ "media/test/data/test-25fps.h264",
+};
+
+INSTANTIATE_TEST_CASE_P(RegularDecodingFlowWithFile,
+ GpuVideoDecodeAcceleratorTest,
+ ::testing::ValuesIn(kFileNames));
+
+int main(int argc, char **argv) {
+ // TODO(vmr): Integrate with existing unit test targets.
+ testing::InitGoogleTest(&argc, argv);
+ return RUN_ALL_TESTS();
+}
diff --git a/media/media.gyp b/media/media.gyp
index 23e1956..60cffe2 100644
--- a/media/media.gyp
+++ b/media/media.gyp
@@ -172,6 +172,7 @@
'video/ffmpeg_video_allocator.h',
'video/ffmpeg_video_decode_engine.cc',
'video/ffmpeg_video_decode_engine.h',
+ 'video/video_decode_accelerator.cc',
'video/video_decode_accelerator.h',
'video/video_decode_engine.cc',
'video/video_decode_engine.h',
diff --git a/media/video/picture.cc b/media/video/picture.cc
new file mode 100644
index 0000000..dd57e4a
--- /dev/null
+++ b/media/video/picture.cc
@@ -0,0 +1,72 @@
+// Copyright (c) 2011 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "media/video/picture.h"
+
+namespace media {
+
+// PictureBuffer implementation.
+PictureBuffer::PictureBuffer(int32 id,
+ gfx::Size pixel_size,
+ std::vector<uint32> color_format,
+ MemoryType memory_type,
+ std::vector<DataPlaneHandle> data_plane_handles)
+ : id_(id),
+ pixel_size_(pixel_size),
+ color_format_(color_format),
+ memory_type_(memory_type),
+ data_plane_handles_(data_plane_handles) {
+}
+
+PictureBuffer::~PictureBuffer() {}
+
+int32 PictureBuffer::GetId() {
+ return id_;
+}
+
+gfx::Size PictureBuffer::GetSize() {
+ return pixel_size_;
+}
+
+const std::vector<uint32>& PictureBuffer::GetColorFormat() {
+ return color_format_;
+}
+
+PictureBuffer::MemoryType PictureBuffer::GetMemoryType() {
+ return memory_type_;
+}
+
+std::vector<PictureBuffer::DataPlaneHandle>& PictureBuffer::GetPlaneHandles() {
+ return data_plane_handles_;
+}
+
+// Picture implementation.
+Picture::Picture(PictureBuffer* picture_buffer, gfx::Size decoded_pixel_size,
+ gfx::Size visible_pixel_size, void* user_handle)
+ : picture_buffer_(picture_buffer),
+ decoded_pixel_size_(decoded_pixel_size),
+ visible_pixel_size_(visible_pixel_size),
+ user_handle_(user_handle) {
+}
+
+Picture::~Picture() {}
+
+PictureBuffer* Picture::picture_buffer() {
+ return picture_buffer_;
+}
+
+gfx::Size Picture::GetDecodedSize() const {
+ return decoded_pixel_size_;
+}
+
+gfx::Size Picture::GetVisibleSize() const {
+ return visible_pixel_size_;
+}
+
+void* Picture::GetUserHandle() {
+ return user_handle_;
+}
+
+} // namespace media
+
diff --git a/media/video/picture.h b/media/video/picture.h
new file mode 100644
index 0000000..ec84453
--- /dev/null
+++ b/media/video/picture.h
@@ -0,0 +1,65 @@
+// Copyright (c) 2011 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef MEDIA_VIDEO_PICTURE_H_
+#define MEDIA_VIDEO_PICTURE_H_
+
+#include <vector>
+
+#include "base/compiler_specific.h"
+#include "media/video/video_decode_accelerator.h"
+
+namespace media {
+
+// TODO(vmr): Evaluate the generalization potential of these interfaces &
+// classes and refactor as needed with the rest of media stack.
+class PictureBuffer : public VideoDecodeAccelerator::PictureBuffer {
+ public:
+ PictureBuffer(int32 id, gfx::Size pixel_size,
+ std::vector<uint32> color_format, MemoryType memory_type,
+ std::vector<DataPlaneHandle> data_plane_handles);
+ virtual ~PictureBuffer();
+
+ // VideoDecodeAccelerator::PictureBuffer implementation.
+ virtual int32 GetId() OVERRIDE;
+ virtual gfx::Size GetSize() OVERRIDE;
+ virtual const std::vector<uint32>& GetColorFormat() OVERRIDE;
+ virtual MemoryType GetMemoryType() OVERRIDE;
+ virtual std::vector<DataPlaneHandle>& GetPlaneHandles() OVERRIDE;
+
+ private:
+ int32 id_;
+ gfx::Size pixel_size_;
+ std::vector<uint32> color_format_;
+ MemoryType memory_type_;
+ std::vector<DataPlaneHandle>& data_plane_handles_;
+
+ DISALLOW_IMPLICIT_CONSTRUCTORS(PictureBuffer);
+};
+
+class Picture : public VideoDecodeAccelerator::Picture {
+ public:
+ Picture(PictureBuffer* picture_buffer, gfx::Size decoded_pixel_size,
+ gfx::Size visible_pixel_size, void* user_handle);
+ virtual ~Picture();
+
+ // VideoDecodeAccelerator::Picture implementation.
+ virtual PictureBuffer* picture_buffer() OVERRIDE;
+ virtual gfx::Size GetDecodedSize() const OVERRIDE;
+ virtual gfx::Size GetVisibleSize() const OVERRIDE;
+ virtual void* GetUserHandle() OVERRIDE;
+
+ private:
+ // Pointer to the picture buffer which contains this picture.
+ PictureBuffer* picture_buffer_;
+ gfx::Size decoded_pixel_size_;
+ gfx::Size visible_pixel_size_;
+ void* user_handle_;
+
+ DISALLOW_IMPLICIT_CONSTRUCTORS(Picture);
+};
+
+} // namespace media
+
+#endif // MEDIA_VIDEO_PICTURE_H_
diff --git a/media/video/video_decode_accelerator.cc b/media/video/video_decode_accelerator.cc
new file mode 100644
index 0000000..c88d48a
--- /dev/null
+++ b/media/video/video_decode_accelerator.cc
@@ -0,0 +1,15 @@
+// Copyright (c) 2011 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "media/video/video_decode_accelerator.h"
+
+namespace media {
+
+VideoDecodeAccelerator::~VideoDecodeAccelerator() {}
+
+VideoDecodeAccelerator::PictureBuffer::~PictureBuffer() {}
+
+VideoDecodeAccelerator::Picture::~Picture() {}
+
+}
diff --git a/media/video/video_decode_accelerator.h b/media/video/video_decode_accelerator.h
index 0c4405f..be799c11 100644
--- a/media/video/video_decode_accelerator.h
+++ b/media/video/video_decode_accelerator.h
@@ -16,34 +16,260 @@ namespace media {
typedef Callback0::Type VideoDecodeAcceleratorCallback;
+// Enumeration defining global dictionary ranges for various purposes that are
+// used to handle the configurations of the video decoder.
+enum VideoAttributeKey {
+ VIDEOATTRIBUTEKEY_TERMINATOR = 0,
+
+ VIDEOATTRIBUTEKEY_BITSTREAM_FORMAT_BASE = 0x100,
+ // Array of key/value pairs describing video configuration.
+ // It could include any keys from PP_VideoKey. Its last element shall be
+ // VIDEOATTRIBUTEKEY_BITSTREAMFORMAT_NONE with no corresponding value.
+ // An example:
+ // {
+ // VIDEOATTRIBUTEKEY_BITSTREAMFORMAT_FOURCC, PP_VIDEODECODECID_VP8,
+ // VIDEOATTRIBUTEKEY_BITSTREAMFORMAT_VP8_PROFILE, (VP8PROFILE_1 |
+ // VP8PROFILE_2 |
+ // VP8PROFILE_3),
+ // VIDEOATTRIBUTEKEY_TERMINATOR
+ // };
+ // Keys for defining video bitstream format.
+ // Value is type of PP_VideoCodecFourcc. Commonly known attributes values are
+ // defined in PP_VideoCodecFourcc enumeration.
+ VIDEOATTRIBUTEKEY_BITSTREAMFORMAT_FOURCC,
+ // Bitrate in bits/s. Attribute value is 32-bit unsigned integer.
+ VIDEOATTRIBUTEKEY_BITSTREAMFORMAT_BITRATE,
+ // Width and height of the input video bitstream, if known by the application.
+ // Decoder will expect the bitstream to match these values and does memory
+ // considerations accordingly.
+ VIDEOATTRIBUTEKEY_BITSTREAMFORMAT_WIDTH,
+ VIDEOATTRIBUTEKEY_BITSTREAMFORMAT_HEIGHT,
+ // Following attributes are applicable only in case of VP8.
+ // Key for VP8 profile attribute. Attribute value is bitmask of flags defined
+ // in PP_VP8Profile_Dev enumeration.
+ VIDEOATTRIBUTEKEY_BITSTREAMFORMAT_VP8_PROFILE,
+ // Number of partitions per picture. Attribute value is unsigned 32-bit
+ // integer.
+ VIDEOATTRIBUTEKEY_BITSTREAMFORMAT_VP8_NUM_OF_PARTITIONS,
+ // Following attributes are applicable only in case of H.264.
+ // Value is bitmask collection from the flags defined in PP_H264Profile.
+ VIDEOATTRIBUTEKEY_BITSTREAMFORMAT_H264_PROFILE,
+ // Value is type of PP_H264Level.
+ VIDEOATTRIBUTEKEY_BITSTREAMFORMAT_H264_LEVEL,
+ // Value is type of PP_H264PayloadFormat_Dev.
+ VIDEOATTRIBUTEKEY_BITSTREAMFORMAT_H264_PAYLOADFORMAT,
+ // Subset for H.264 features, attribute value 0 signifies unsupported.
+ // This is needed in case decoder has partial support for certain profile.
+ // Default for features are enabled if they're part of supported profile.
+ // H264 tool called Flexible Macroblock Ordering.
+ VIDEOATTRIBUTEKEY_BITSTREAMFORMAT_H264_FEATURE_FMO,
+ // H264 tool called Arbitrary Slice Ordering.
+ VIDEOATTRIBUTEKEY_BITSTREAMFORMAT_H264_FEATURE_ASO,
+ // H264 tool called Interlacing.
+ VIDEOATTRIBUTEKEY_BITSTREAMFORMAT_H264_FEATURE_INTERLACE,
+ // H264 tool called Context-Adaptive Binary Arithmetic Coding.
+ VIDEOATTRIBUTEKEY_BITSTREAMFORMAT_H264_FEATURE_CABAC,
+ // H264 tool called Weighted Prediction.
+ VIDEOATTRIBUTEKEY_BITSTREAMFORMAT_H264_FEATURE_WEIGHTEDPREDICTION,
+
+ VIDEOATTRIBUTEKEY_COLORFORMAT_BASE = 0x1000,
+ // Keys for definining attributes of a color buffer. Using these attributes
+ // users can define color spaces in terms of red, green, blue and alpha
+ // components as well as with combination of luma and chroma values with
+ // different subsampling schemes. Also planar, semiplanar and interleaved
+ // formats can be described by using the provided keys as instructed.
+ //
+ // Rules for describing the color planes (1 or more) that constitute the whole
+ // picture are:
+ // 1. Each plane starts with VIDEOATTRIBUTEKEY_COLORFORMAT_PLANE_PIXEL_SIZE
+ // attribute telling how many bits per pixel the plane contains.
+ // 2. VIDEOATTRIBUTEKEY_COLORFORMAT_PLANE_PIXEL_SIZE attribute must be
+ // followed either by
+ // a. Red, green and blue components followed optionally by alpha size
+ // attribute.
+ // OR
+ // b. Luma, blue difference chroma and red difference chroma components as
+ // well as three sampling reference factors that tell how the chroma may
+ // have been subsampled with respect to luma.
+ // 3. Description must be terminated with VIDEOATTRIBUTEKEY_COLORFORMAT_NONE
+ // key with no value for attribute.
+ //
+ // For example, semiplanar YCbCr 4:2:2 (2 planes, one containing 8-bit luma,
+ // the other containing two interleaved chroma data components) may be
+ // described with the following attributes:
+ // {
+ // VIDEOATTRIBUTEKEY_COLORFORMAT_PLANE_PIXEL_SIZE, 8,
+ // VIDEOATTRIBUTEKEY_COLORFORMAT_LUMA_SIZE, 8,
+ // VIDEOATTRIBUTEKEY_COLORFORMAT_PLANE_PIXEL_SIZE, 16,
+ // VIDEOATTRIBUTEKEY_COLORFORMAT_CHROMA_BLUE_SIZE, 8,
+ // VIDEOATTRIBUTEKEY_COLORFORMAT_CHROMA_RED_SIZE, 8,
+ // VIDEOATTRIBUTEKEY_COLORFORMAT_HORIZONTAL_SAMPLING_FACTOR_REFERENCE, 4,
+ // VIDEOATTRIBUTEKEY_COLORFORMAT_CHROMA_HORIZONTAL_SUBSAMPLING_FACTOR, 2,
+ // VIDEOATTRIBUTEKEY_COLORFORMAT_CHROMA_VERTICAL_SUBSAMPLING_FACTOR, 2
+ // VIDEOATTRIBUTEKEY_TERMINATOR
+ // }
+ //
+ // Another example, commonly known 16-bit RGB 565 color format may be
+ // specified as follows:
+ // {
+ // VIDEOATTRIBUTEKEY_COLORFORMAT_PLANE_PIXEL_SIZE, 16,
+ // VIDEOATTRIBUTEKEY_COLORFORMAT_RED_SIZE, 5,
+ // VIDEOATTRIBUTEKEY_COLORFORMAT_GREEN_SIZE, 6,
+ // VIDEOATTRIBUTEKEY_COLORFORMAT_BLUE_SIZE, 5,
+ // VIDEOATTRIBUTEKEY_TERMINATOR
+ // }
+ // Total color component bits per pixel in the picture buffer.
+ VIDEOATTRIBUTEKEY_COLORFORMAT_PLANE_PIXEL_SIZE,
+ // Bits of red per pixel in picture buffer.
+ VIDEOATTRIBUTEKEY_COLORFORMAT_RED_SIZE,
+ // Bits of green per pixel in picture buffer.
+ VIDEOATTRIBUTEKEY_COLORFORMAT_GREEN_SIZE,
+ // Bits of blue per pixel in picture buffer.
+ VIDEOATTRIBUTEKEY_COLORFORMAT_BLUE_SIZE,
+ // Bits of alpha in color buffer.
+ VIDEOATTRIBUTEKEY_COLORFORMAT_ALPHA_SIZE,
+ // Bits of luma per pixel in color buffer.
+ VIDEOATTRIBUTEKEY_COLORFORMAT_LUMA_SIZE,
+ // Bits of blue difference chroma (Cb) data in color buffer.
+ VIDEOATTRIBUTEKEY_COLORFORMAT_CHROMA_BLUE_SIZE,
+ // Bits of blue difference chroma (Cr) data in color buffer.
+ VIDEOATTRIBUTEKEY_COLORFORMAT_CHROMA_RED_SIZE,
+ // Three keys to describe the subsampling of YCbCr sampled digital video
+ // signal. For example, 4:2:2 sampling could be defined by setting:
+ // VIDEOATTRIBUTEKEY_COLORFORMAT_HORIZONTAL_SAMPLING_FACTOR_REFERENCE = 4
+ // VIDEOATTRIBUTEKEY_COLORFORMAT_CHROMINANCE_HORIZONTAL_SUBSAMPLING_FACTOR = 2
+ // VIDEOATTRIBUTEKEY_COLORFORMAT_CHROMINANCE_VERTICAL_SUBSAMPLING_FACTOR = 2
+ VIDEOATTRIBUTEKEY_COLORFORMAT_HORIZONTAL_SAMPLING_FACTOR_REFERENCE,
+ VIDEOATTRIBUTEKEY_COLORFORMAT_CHROMA_HORIZONTAL_SUBSAMPLING_FACTOR,
+ VIDEOATTRIBUTEKEY_COLORFORMAT_CHROMA_VERTICAL_SUBSAMPLING_FACTOR,
+ // Base for telling implementation specific information about the optimal
+ // number of picture buffers to be provided to the implementation.
+ VIDEOATTRIBUTEKEY_PICTUREBUFFER_REQUIREMENTS_BASE = 0x10000,
+ // Following two keys are used to signal how many buffers are needed by the
+ // implementation as a function of the maximum number of reference frames set
+ // by the stream. Number of required buffers is
+ // MAX_REF_FRAMES * REFERENCE_PIC_MULTIPLIER + ADDITIONAL_BUFFERS
+ VIDEOATTRIBUTEKEY_PICTUREBUFFER_REQUIREMENTS_ADDITIONAL_BUFFERS,
+ VIDEOATTRIBUTEKEY_PICTUREBUFFER_REQUIREMENTS_REFERENCE_PIC_MULTIPLIER,
+ // If decoder does not support pixel accurate strides for picture buffer, this
+ // parameter tells the stride multiple that is needed by the decoder. Plugin
+ // must obey the given stride in its picture buffer allocations.
+ VIDEOATTRIBUTEKEY_PICTUREBUFFER_REQUIREMENTS_STRIDE_MULTIPLE,
+};
+
+enum VideoCodecFourcc {
+ VIDEOCODECFOURCC_NONE = 0,
+ VIDEOCODECFOURCC_VP8 = 0x00385056, // a.k.a. Fourcc 'VP8\0'.
+ VIDEOCODECFOURCC_H264 = 0x31637661, // a.k.a. Fourcc 'avc1'.
+};
+
+// VP8 specific information to be carried over the APIs.
+// Enumeration for flags defining supported VP8 profiles.
+enum VP8Profile {
+ VP8PROFILE_NONE = 0,
+ VP8PROFILE_0 = 1,
+ VP8PROFILE_1 = 1 << 1,
+ VP8PROFILE_2 = 1 << 2,
+ VP8PROFILE_3 = 1 << 3,
+};
+
+// H.264 specific information to be carried over the APIs.
+// Enumeration for flags defining supported H.264 profiles.
+enum H264Profile {
+ H264PROFILE_NONE = 0,
+ H264PROFILE_BASELINE = 1,
+ H264PROFILE_MAIN = 1 << 2,
+ H264PROFILE_EXTENDED = 1 << 3,
+ H264PROFILE_HIGH = 1 << 4,
+ H264PROFILE_HIGH10PROFILE = 1 << 5,
+ H264PROFILE_HIGH422PROFILE = 1 << 6,
+ H264PROFILE_HIGH444PREDICTIVEPROFILE = 1 << 7,
+ H264PROFILE_SCALABLEBASELINE = 1 << 8,
+ H264PROFILE_SCALABLEHIGH = 1 << 9,
+ H264PROFILE_STEREOHIGH = 1 << 10,
+ H264PROFILE_MULTIVIEWHIGH = 1 << 11,
+};
+
+// Enumeration for defining H.264 level of decoder implementation.
+enum H264Level {
+ H264LEVEL_NONE = 0,
+ H264LEVEL_10 = 1,
+ H264LEVEL_1B = H264LEVEL_10 | 1 << 1,
+ H264LEVEL_11 = H264LEVEL_1B | 1 << 2,
+ H264LEVEL_12 = H264LEVEL_11 | 1 << 3,
+ H264LEVEL_13 = H264LEVEL_12 | 1 << 4,
+ H264LEVEL_20 = H264LEVEL_13 | 1 << 5,
+ H264LEVEL_21 = H264LEVEL_20 | 1 << 6,
+ H264LEVEL_22 = H264LEVEL_21 | 1 << 7,
+ H264LEVEL_30 = H264LEVEL_22 | 1 << 8,
+ H264LEVEL_31 = H264LEVEL_30 | 1 << 9,
+ H264LEVEL_32 = H264LEVEL_31 | 1 << 10,
+ H264LEVEL_40 = H264LEVEL_32 | 1 << 11,
+ H264LEVEL_41 = H264LEVEL_40 | 1 << 12,
+ H264LEVEL_42 = H264LEVEL_41 | 1 << 13,
+ H264LEVEL_50 = H264LEVEL_42 | 1 << 14,
+ H264LEVEL_51 = H264LEVEL_50 | 1 << 15,
+};
+
+// Enumeration to describe which payload format is used within the exchanged
+// bitstream buffers.
+enum H264PayloadFormat {
+ H264PAYLOADFORMAT_NONE = 0,
+ // NALUs separated by Start Code.
+ H264PAYLOADFORMAT_BYTESTREAM = 1,
+ // Exactly one raw NALU per buffer.
+ H264PAYLOADFORMAT_ONE_NALU_PER_BUFFER = 1 << 1,
+ // NALU separated by 1-byte interleaved length field.
+ H264PAYLOADFORMAT_ONE_BYTE_INTERLEAVED_LENGTH = 1 << 2,
+ // NALU separated by 2-byte interleaved length field.
+ H264PAYLOADFORMAT_TWO_BYTE_INTERLEAVED_LENGTH = 1 << 3,
+ // NALU separated by 4-byte interleaved length field.
+ H264PAYLOADFORMAT_FOUR_BYTE_INTERLEAVED_LENGTH = 1 << 4,
+};
+
// Video decoder interface.
+// TODO(vmr): Move much of the inner classes to media namespace to simplify code
+// that's using it.
class VideoDecodeAccelerator {
public:
- virtual ~VideoDecodeAccelerator() {}
+ virtual ~VideoDecodeAccelerator();
// Enumeration of potential errors generated by the API.
enum Error {
VIDEODECODERERROR_NONE = 0,
VIDEODECODERERROR_UNINITIALIZED,
VIDEODECODERERROR_UNSUPPORTED,
+ VIDEODECODERERROR_INVALIDINPUT,
+ VIDEODECODERERROR_MEMFAILURE,
VIDEODECODERERROR_INSUFFICIENT_BUFFERS,
VIDEODECODERERROR_INSUFFICIENT_RESOURCES,
VIDEODECODERERROR_HARDWARE,
+ VIDEODECODERERROR_UNEXPECTED_FLUSH,
};
// Interface expected from PictureBuffers where pictures are stored.
class PictureBuffer {
public:
enum MemoryType {
- PICTUREBUFFER_MEMORYTYPE_SYSTEM = 0,
+ PICTUREBUFFER_MEMORYTYPE_NONE = 0,
+ PICTUREBUFFER_MEMORYTYPE_SYSTEM,
PICTUREBUFFER_MEMORYTYPE_GL_TEXTURE,
};
+ // Union to represent one data plane in picture buffer.
+ union DataPlaneHandle {
+ struct {
+ uint32 context_id; // GLES context id.
+ uint32 texture_id; // GLES texture id.
+ };
+ void* sysmem; // Simply a pointer to system memory.
+ };
virtual ~PictureBuffer();
- virtual uint32 GetId() = 0;
+ virtual int32 GetId() = 0;
virtual gfx::Size GetSize() = 0;
virtual const std::vector<uint32>& GetColorFormat() = 0;
virtual MemoryType GetMemoryType() = 0;
+ virtual std::vector<DataPlaneHandle>& GetPlaneHandles() = 0;
};
class Picture {
@@ -76,6 +302,9 @@ class VideoDecodeAccelerator {
// the picture buffer size it has provided to the decoder. Thus, there is
// no function to query the buffer size from this class.
+ // Returns the picture buffer where this picture is contained.
+ virtual PictureBuffer* picture_buffer() = 0;
+
// Returns the decoded size of the decoded picture in pixels.
virtual gfx::Size GetDecodedSize() const = 0;
@@ -181,7 +410,7 @@ class VideoDecodeAccelerator {
// |callback| contains the callback function pointer.
//
// Returns true when command successfully accepted. Otherwise false.
- virtual bool Abort(VideoDecodeAcceleratorCallback* callback) = 0;
+ virtual bool Abort(VideoDecodeAcceleratorCallback* callback) = 0;
};
} // namespace media
diff --git a/ppapi/c/dev/pp_video_dev.h b/ppapi/c/dev/pp_video_dev.h
index f568105..50ad226 100644
--- a/ppapi/c/dev/pp_video_dev.h
+++ b/ppapi/c/dev/pp_video_dev.h
@@ -31,8 +31,6 @@ enum PP_VideoAttributeDictionary {
// PP_VIDEOATTR_DICTIONARY_TERMINATOR
// };
// Keys for defining video bitstream format.
- // Terminating entry for bitstream format descriptions.
- PP_VIDEOATTR_BITSTREAMFORMATKEY_NONE,
// Value is type of PP_VideoCodecFourcc. Commonly known attributes values are
// defined in PP_VideoCodecFourcc enumeration.
PP_VIDEOATTR_BITSTREAMFORMATKEY_FOURCC,
@@ -287,11 +285,10 @@ union PP_PictureData_Dev {
PP_Resource sysmem;
// Structure to define explicitly a GLES2 context.
struct {
- // Context allocated using. Use PPB_Context3D_Dev interface to handle this
- // resource.
+ // Context allocated using PPB_Context3D_Dev.
PP_Resource context;
// Texture ID in the given context where picture is stored.
- GLuint textureId;
+ GLuint texture_id;
} gles2_texture;
// Client-specified id for the picture buffer. By using this value client can
// keep track of the buffers it has assigned to the video decoder and how they
@@ -312,7 +309,7 @@ struct PP_Picture_Dev {
// information carried over metadata includes timestamps. If there is
// multiple NAL units each with their own respective metadata, only the
// metadata from the latest call to Decode will be carried over.
- void* metadata;
+ void* user_handle;
};
// Enumeration for error events that may be reported through
@@ -325,6 +322,10 @@ enum PP_VideoDecodeError_Dev {
PP_VIDEODECODEERROR_UNINITIALIZED,
// Decoder does not support feature of configuration or bitstream.
PP_VIDEODECODEERROR_UNSUPPORTED,
+ // Decoder did not get valid input.
+ PP_VIDEODECODERERROR_INVALIDINPUT,
+ // Failure in memory allocation or mapping.
+ PP_VIDEODECODERERROR_MEMFAILURE,
// Decoder was given bitstream that would result in output pictures but it
// has not been provided buffers to do all this.
PP_VIDEODECODEERROR_INSUFFICIENT_BUFFERS,
diff --git a/ppapi/ppapi_tests.gypi b/ppapi/ppapi_tests.gypi
index c2fc7eb..4c4271c 100644
--- a/ppapi/ppapi_tests.gypi
+++ b/ppapi/ppapi_tests.gypi
@@ -232,6 +232,8 @@
'tests/test_url_util.h',
'tests/test_utils.cc',
'tests/test_utils.h',
+ 'tests/test_video_decoder.cc',
+ 'tests/test_video_decoder.h',
# Deprecated test cases.
'tests/test_instance_deprecated.cc',
diff --git a/ppapi/tests/test_video_decoder.cc b/ppapi/tests/test_video_decoder.cc
new file mode 100644
index 0000000..f669d47
--- /dev/null
+++ b/ppapi/tests/test_video_decoder.cc
@@ -0,0 +1,46 @@
+// Copyright (c) 2011 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "ppapi/tests/test_video_decoder.h"
+
+#include "ppapi/c/dev/ppb_video_decoder_dev.h"
+#include "ppapi/c/dev/ppb_testing_dev.h"
+#include "ppapi/c/ppb_var.h"
+#include "ppapi/tests/testing_instance.h"
+
+REGISTER_TEST_CASE(VideoDecoder);
+
+bool TestVideoDecoder::Init() {
+ video_decoder_interface_ = reinterpret_cast<PPB_VideoDecoder_Dev const*>(
+ pp::Module::Get()->GetBrowserInterface(PPB_VIDEODECODER_DEV_INTERFACE));
+ var_interface_ = reinterpret_cast<PPB_Var const*>(
+ pp::Module::Get()->GetBrowserInterface(PPB_VAR_INTERFACE));
+ testing_interface_ = reinterpret_cast<PPB_Testing_Dev const*>(
+ pp::Module::Get()->GetBrowserInterface(PPB_TESTING_DEV_INTERFACE));
+ if (!testing_interface_) {
+ // Give a more helpful error message for the testing interface being gone
+ // since that needs special enabling in Chrome.
+ instance_->AppendError("This test needs the testing interface, which is "
+ "not currently available. In Chrome, use --enable-pepper-testing when "
+ "launching.");
+ }
+ return video_decoder_interface_ && var_interface_ && testing_interface_;
+}
+
+void TestVideoDecoder::RunTest() {
+ instance_->LogTest("Create", TestCreate());
+}
+
+void TestVideoDecoder::QuitMessageLoop() {
+ testing_interface_->QuitMessageLoop(instance_->pp_instance());
+}
+
+std::string TestVideoDecoder::TestCreate() {
+ PP_Resource decoder = video_decoder_interface_->Create(
+ instance_->pp_instance(), NULL);
+ if (decoder == 0) {
+ return "Error creating the decoder";
+ }
+ PASS();
+}
diff --git a/ppapi/tests/test_video_decoder.h b/ppapi/tests/test_video_decoder.h
new file mode 100644
index 0000000..7c852b7
--- /dev/null
+++ b/ppapi/tests/test_video_decoder.h
@@ -0,0 +1,34 @@
+// Copyright (c) 2011 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef PPAPI_TESTS_TEST_VIDEO_DECODER_H_
+#define PPAPI_TESTS_TEST_VIDEO_DECODER_H_
+
+#include "ppapi/c/pp_stdint.h"
+#include "ppapi/tests/test_case.h"
+
+struct PPB_Testing_Dev;
+struct PPB_Var;
+struct PPB_VideoDecoder_Dev;
+
+class TestVideoDecoder : public TestCase {
+ public:
+ TestVideoDecoder(TestingInstance* instance) : TestCase(instance) {}
+
+ // TestCase implementation.
+ virtual bool Init();
+ virtual void RunTest();
+
+ void QuitMessageLoop();
+
+ private:
+ std::string TestCreate();
+
+ // Used by the tests that access the C API directly.
+ const PPB_VideoDecoder_Dev* video_decoder_interface_;
+ const PPB_Var* var_interface_;
+ const PPB_Testing_Dev* testing_interface_;
+};
+
+#endif // PPAPI_TESTS_TEST_VIDEO_DECODER_H_
diff --git a/webkit/plugins/ppapi/ppb_video_decoder_impl.cc b/webkit/plugins/ppapi/ppb_video_decoder_impl.cc
index 60bed3d..ec2d3f1 100644
--- a/webkit/plugins/ppapi/ppb_video_decoder_impl.cc
+++ b/webkit/plugins/ppapi/ppb_video_decoder_impl.cc
@@ -91,20 +91,20 @@ void ReusePictureBuffer(PP_Resource video_decoder,
decoder->ReusePictureBuffer(picture_buffer);
}
-PP_Bool Flush(PP_Resource decoder_id,
+PP_Bool Flush(PP_Resource video_decoder,
PP_CompletionCallback callback) {
scoped_refptr<PPB_VideoDecoder_Impl> decoder(
- Resource::GetAs<PPB_VideoDecoder_Impl>(decoder_id));
+ Resource::GetAs<PPB_VideoDecoder_Impl>(video_decoder));
if (!decoder)
return PP_FALSE;
return BoolToPPBool(decoder->Flush(callback));
}
-PP_Bool Abort(PP_Resource decoder_id,
+PP_Bool Abort(PP_Resource video_decoder,
PP_CompletionCallback callback) {
scoped_refptr<PPB_VideoDecoder_Impl> decoder(
- Resource::GetAs<PPB_VideoDecoder_Impl>(decoder_id));
+ Resource::GetAs<PPB_VideoDecoder_Impl>(video_decoder));
if (!decoder)
return PP_FALSE;