summaryrefslogtreecommitdiffstats
path: root/chrome/gpu
diff options
context:
space:
mode:
authorjiesun@google.com <jiesun@google.com@0039d316-1c4b-4281-b951-d872f2087c98>2010-08-09 15:59:44 +0000
committerjiesun@google.com <jiesun@google.com@0039d316-1c4b-4281-b951-d872f2087c98>2010-08-09 15:59:44 +0000
commitc9f28a7d16b71b17185b456e3b53e4e638d005d0 (patch)
tree5940cfb4244935b01608e286f84a83413e0329a1 /chrome/gpu
parent6b1b2b6bc21270ef1b90a28293a1cb743e933383 (diff)
downloadchromium_src-c9f28a7d16b71b17185b456e3b53e4e638d005d0.zip
chromium_src-c9f28a7d16b71b17185b456e3b53e4e638d005d0.tar.gz
chromium_src-c9f28a7d16b71b17185b456e3b53e4e638d005d0.tar.bz2
Special thanks for in-ming cheng's MFT hardware decodering code.
1. ipc_video_decoder.cc/h is media pipeline filter which use the gpu decoder facilities in video stack. it is only enabled when (a) hardware composition is on (b) hardware decoding command line is on (c) h264 codec is specified. 2. gpu_video_service.cc/h is a singleton in gpu process which provide video services for renderer process, through it we could create decoder. ( in my imagination, in the future, we could create encoder or capturer too) 3. gpu_video_decoder.cc/h. abstract interface for hardware decoder. 4. gpu_video_decoder_mft.cc/h media foundation transform hardware decoder which run on windows 7 only. 5. gpu_video_service_host.cc/h is singleton in renderer process which provide proxy for gpu_video_service. 6. gpu_video_decoder_host.cc/h is proxy for gpu_video_decoder. (1 to 1 map).basically there is one global GpuVideoService in GPU process, one GpuVideoServiceHost in Renderer process. for each renderer process, there are could be multiple renderer view, each could had multiple GpuVideoDecoderHost the connect to GpuVideoDeocder through GPUCHannelHOst/GpuChannel. 7. gpu_video_common.cc/h: IPC message definition and pickle/marshaling support. ISSUES: 1. in media pipeline, we need let decoder to determine if bit stream filter should be used instead of let command line to determine it. 2. stop readback from D3D surface use ANGLE. 3. Flush logic still need fine tuning. 4. CreateThread in GpuVideoDecoder, and post message in message handler, and derived classs handle message loop. ? 5. Error handling. 6. Input ring buffer implementation. Current impl is naive. 7.Add output queue for MFT decoder. 8. Query Capabilities at GetVideoServices()... BUG=None TEST=Windows7 Review URL: http://codereview.chromium.org/2873089 git-svn-id: svn://svn.chromium.org/chrome/trunk/src@55405 0039d316-1c4b-4281-b951-d872f2087c98
Diffstat (limited to 'chrome/gpu')
-rw-r--r--chrome/gpu/gpu_channel.cc52
-rw-r--r--chrome/gpu/gpu_channel.h5
-rw-r--r--chrome/gpu/gpu_video_decoder.cc108
-rw-r--r--chrome/gpu/gpu_video_decoder.h76
-rw-r--r--chrome/gpu/gpu_video_decoder_mft.cc594
-rw-r--r--chrome/gpu/gpu_video_decoder_mft.h98
-rw-r--r--chrome/gpu/gpu_video_service.cc76
-rw-r--r--chrome/gpu/gpu_video_service.h56
8 files changed, 1065 insertions, 0 deletions
diff --git a/chrome/gpu/gpu_channel.cc b/chrome/gpu/gpu_channel.cc
index 69c66d4..a63faa9 100644
--- a/chrome/gpu/gpu_channel.cc
+++ b/chrome/gpu/gpu_channel.cc
@@ -17,6 +17,7 @@
#include "chrome/common/chrome_switches.h"
#include "chrome/common/gpu_messages.h"
#include "chrome/gpu/gpu_thread.h"
+#include "chrome/gpu/gpu_video_service.h"
#if defined(OS_POSIX)
#include "ipc/ipc_channel_posix.h"
@@ -88,6 +89,12 @@ void GpuChannel::OnControlMessageReceived(const IPC::Message& msg) {
OnCreateOffscreenCommandBuffer)
IPC_MESSAGE_HANDLER(GpuChannelMsg_DestroyCommandBuffer,
OnDestroyCommandBuffer)
+ IPC_MESSAGE_HANDLER(GpuChannelMsg_GetVideoService,
+ OnGetVideoService)
+ IPC_MESSAGE_HANDLER(GpuChannelMsg_CreateVideoDecoder,
+ OnCreateVideoDecoder)
+ IPC_MESSAGE_HANDLER(GpuChannelMsg_DestroyVideoDecoder,
+ OnDestroyVideoDecoder)
IPC_MESSAGE_UNHANDLED_ERROR()
IPC_END_MESSAGE_MAP()
}
@@ -178,6 +185,50 @@ void GpuChannel::OnDestroyCommandBuffer(int32 route_id) {
#endif
}
+void GpuChannel::OnGetVideoService(GpuVideoServiceInfoParam* info) {
+ info->service_available_ = 0;
+#if defined(ENABLE_GPU)
+#if defined(OS_WIN)
+ // TODO(jiesun): Not every windows platforms will support our media
+ // foundation implementation. Add more check here.
+ LOG(INFO) << "GpuChannel::OnGetVideoService";
+ GpuVideoService* service = GpuVideoService::get();
+ if (service == NULL)
+ return;
+
+ info->video_service_host_route_id_ = GenerateRouteID();
+ info->video_service_route_id_ = GenerateRouteID();
+ // TODO(jiesun): we could had multiple entries in this routing table.
+ router_.AddRoute(info->video_service_route_id_, service);
+ info->service_available_ = 1;
+#endif
+#endif
+}
+
+void GpuChannel::OnCreateVideoDecoder(GpuVideoDecoderInfoParam* info) {
+#if defined(ENABLE_GPU)
+ LOG(INFO) << "GpuChannel::OnCreateVideoDecoder";
+ info->decoder_id_ = -1;
+ GpuVideoService* service = GpuVideoService::get();
+ if (service == NULL)
+ return;
+
+ info->decoder_host_route_id_ = GenerateRouteID();
+ info->decoder_route_id_ = GenerateRouteID();
+ service->CreateVideoDecoder(this, &router_, info);
+#endif
+}
+
+void GpuChannel::OnDestroyVideoDecoder(int32 decoder_id) {
+#if defined(ENABLE_GPU)
+ LOG(ERROR) << "GpuChannel::OnDestroyVideoDecoder";
+ GpuVideoService* service = GpuVideoService::get();
+ if (service == NULL)
+ return;
+ service->DestroyVideoDecoder(&router_, decoder_id);
+#endif
+}
+
bool GpuChannel::Init() {
// Check whether we're already initialized.
if (channel_.get())
@@ -198,6 +249,7 @@ bool GpuChannel::Init() {
channel_name, IPC::Channel::MODE_SERVER, this, NULL,
ChildProcess::current()->io_message_loop(), false,
ChildProcess::current()->GetShutDownEvent()));
+
return true;
}
diff --git a/chrome/gpu/gpu_channel.h b/chrome/gpu/gpu_channel.h
index da6601f..f21007f 100644
--- a/chrome/gpu/gpu_channel.h
+++ b/chrome/gpu/gpu_channel.h
@@ -12,6 +12,7 @@
#include "base/scoped_open_process.h"
#include "base/scoped_ptr.h"
#include "build/build_config.h"
+#include "chrome/common/gpu_video_common.h"
#include "chrome/common/message_router.h"
#include "chrome/gpu/gpu_command_buffer_stub.h"
#include "gfx/native_widget_types.h"
@@ -72,6 +73,10 @@ class GpuChannel : public IPC::Channel::Listener,
int32* route_id);
void OnDestroyCommandBuffer(int32 route_id);
+ void OnGetVideoService(GpuVideoServiceInfoParam* info);
+ void OnCreateVideoDecoder(GpuVideoDecoderInfoParam* info);
+ void OnDestroyVideoDecoder(int32 decoder_id);
+
scoped_ptr<IPC::SyncChannel> channel_;
// Handle to the renderer process who is on the other side of the channel.
diff --git a/chrome/gpu/gpu_video_decoder.cc b/chrome/gpu/gpu_video_decoder.cc
new file mode 100644
index 0000000..fa478bf
--- /dev/null
+++ b/chrome/gpu/gpu_video_decoder.cc
@@ -0,0 +1,108 @@
+// Copyright (c) 2010 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "chrome/common/gpu_messages.h"
+#include "chrome/gpu/gpu_channel.h"
+#include "chrome/gpu/gpu_video_decoder.h"
+
+void GpuVideoDecoder::OnChannelConnected(int32 peer_pid) {
+}
+
+void GpuVideoDecoder::OnChannelError() {
+}
+
+void GpuVideoDecoder::OnMessageReceived(const IPC::Message& msg) {
+ IPC_BEGIN_MESSAGE_MAP(GpuVideoDecoder, msg)
+ IPC_MESSAGE_HANDLER(GpuVideoDecoderMsg_Initialize,
+ OnInitialize)
+ IPC_MESSAGE_HANDLER(GpuVideoDecoderMsg_Destroy,
+ OnUninitialize)
+ IPC_MESSAGE_HANDLER(GpuVideoDecoderMsg_Flush,
+ OnFlush)
+ IPC_MESSAGE_HANDLER(GpuVideoDecoderMsg_EmptyThisBuffer,
+ OnEmptyThisBuffer)
+ IPC_MESSAGE_HANDLER(GpuVideoDecoderMsg_FillThisBuffer,
+ OnFillThisBuffer)
+ IPC_MESSAGE_HANDLER(GpuVideoDecoderMsg_FillThisBufferDoneACK,
+ OnFillThisBufferDoneACK)
+ IPC_MESSAGE_UNHANDLED_ERROR()
+ IPC_END_MESSAGE_MAP()
+}
+
+GpuVideoDecoder::GpuVideoDecoder(
+ const GpuVideoDecoderInfoParam* param,
+ GpuChannel* channel,
+ base::ProcessHandle handle)
+ : decoder_host_route_id_(param->decoder_host_route_id_),
+ channel_(channel), renderer_handle_(handle) {
+}
+
+void GpuVideoDecoder::OnInitialize(const GpuVideoDecoderInitParam& param) {
+ init_param_ = param;
+ done_param_.success_ = DoInitialize(init_param_, &done_param_);
+}
+
+void GpuVideoDecoder::OnUninitialize() {
+ DoUninitialize();
+}
+
+void GpuVideoDecoder::OnFlush() {
+ DoFlush();
+}
+
+void GpuVideoDecoder::OnEmptyThisBuffer(
+ const GpuVideoDecoderInputBufferParam& buffer) {
+ DoEmptyThisBuffer(buffer);
+}
+void GpuVideoDecoder::OnFillThisBuffer(
+ const GpuVideoDecoderOutputBufferParam& frame) {
+ DoFillThisBuffer(frame);
+}
+
+void GpuVideoDecoder::OnFillThisBufferDoneACK() {
+ DoFillThisBufferDoneACK();
+}
+
+void GpuVideoDecoder::SendInitializeDone(
+ const GpuVideoDecoderInitDoneParam& param) {
+ if (!channel_->Send(
+ new GpuVideoDecoderHostMsg_InitializeACK(route_id(), param))) {
+ LOG(ERROR) << "GpuVideoDecoderMsg_InitializeACK failed";
+ }
+}
+
+void GpuVideoDecoder::SendUninitializeDone() {
+ if (!channel_->Send(new GpuVideoDecoderHostMsg_DestroyACK(route_id()))) {
+ LOG(ERROR) << "GpuVideoDecoderMsg_DestroyACK failed";
+ }
+}
+
+void GpuVideoDecoder::SendFlushDone() {
+ if (!channel_->Send(new GpuVideoDecoderHostMsg_FlushACK(route_id()))) {
+ LOG(ERROR) << "GpuVideoDecoderMsg_FlushACK failed";
+ }
+}
+
+void GpuVideoDecoder::SendEmptyBufferDone() {
+ if (!channel_->Send(
+ new GpuVideoDecoderHostMsg_EmptyThisBufferDone(route_id()))) {
+ LOG(ERROR) << "GpuVideoDecoderMsg_EmptyThisBufferDone failed";
+ }
+}
+
+void GpuVideoDecoder::SendEmptyBufferACK() {
+ if (!channel_->Send(
+ new GpuVideoDecoderHostMsg_EmptyThisBufferACK(route_id()))) {
+ LOG(ERROR) << "GpuVideoDecoderMsg_EmptyThisBufferACK failed";
+ }
+}
+
+void GpuVideoDecoder::SendFillBufferDone(
+ const GpuVideoDecoderOutputBufferParam& frame) {
+ if (!channel_->Send(
+ new GpuVideoDecoderHostMsg_FillThisBufferDone(route_id(), frame))) {
+ LOG(ERROR) << "GpuVideoDecoderMsg_FillThisBufferDone failed";
+ }
+}
+
diff --git a/chrome/gpu/gpu_video_decoder.h b/chrome/gpu/gpu_video_decoder.h
new file mode 100644
index 0000000..62170fe
--- /dev/null
+++ b/chrome/gpu/gpu_video_decoder.h
@@ -0,0 +1,76 @@
+// Copyright (c) 2010 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef CHROME_GPU_GPU_VIDEO_DECODER_H_
+#define CHROME_GPU_GPU_VIDEO_DECODER_H_
+
+#include "base/basictypes.h"
+#include "base/callback.h"
+#include "base/ref_counted.h"
+#include "base/scoped_ptr.h"
+#include "chrome/common/gpu_video_common.h"
+#include "ipc/ipc_channel.h"
+
+class GpuChannel;
+
+class GpuVideoDecoder
+ : public IPC::Channel::Listener,
+ public base::RefCountedThreadSafe<GpuVideoDecoder> {
+
+ public:
+ // IPC::Channel::Listener.
+ virtual void OnChannelConnected(int32 peer_pid);
+ virtual void OnChannelError();
+ virtual void OnMessageReceived(const IPC::Message& message);
+
+ virtual bool DoInitialize(const GpuVideoDecoderInitParam& init_param,
+ GpuVideoDecoderInitDoneParam* done_param) = 0;
+ virtual bool DoUninitialize() = 0;
+ virtual void DoFlush() = 0;
+ virtual void DoEmptyThisBuffer(
+ const GpuVideoDecoderInputBufferParam& buffer) = 0;
+ virtual void DoFillThisBuffer(
+ const GpuVideoDecoderOutputBufferParam& frame) = 0;
+ virtual void DoFillThisBufferDoneACK() = 0;
+
+ GpuVideoDecoder(const GpuVideoDecoderInfoParam* param,
+ GpuChannel* channel_,
+ base::ProcessHandle handle);
+ virtual ~GpuVideoDecoder() {}
+
+ protected:
+ // Output message helper.
+ void SendInitializeDone(const GpuVideoDecoderInitDoneParam& param);
+ void SendUninitializeDone();
+ void SendFlushDone();
+ void SendEmptyBufferDone();
+ void SendEmptyBufferACK();
+ void SendFillBufferDone(const GpuVideoDecoderOutputBufferParam& frame);
+
+ int32 route_id() { return decoder_host_route_id_; }
+
+ int32 decoder_host_route_id_;
+ GpuChannel* channel_;
+ base::ProcessHandle renderer_handle_;
+
+ GpuVideoDecoderInitParam init_param_;
+ GpuVideoDecoderInitDoneParam done_param_;
+
+ scoped_ptr<base::SharedMemory> input_transfer_buffer_;
+ scoped_ptr<base::SharedMemory> output_transfer_buffer_;
+
+ private:
+ // Input message handler.
+ void OnInitialize(const GpuVideoDecoderInitParam& param);
+ void OnUninitialize();
+ void OnFlush();
+ void OnEmptyThisBuffer(const GpuVideoDecoderInputBufferParam& buffer);
+ void OnFillThisBuffer(const GpuVideoDecoderOutputBufferParam& frame);
+ void OnFillThisBufferDoneACK();
+
+ DISALLOW_COPY_AND_ASSIGN(GpuVideoDecoder);
+};
+
+#endif // CHROME_GPU_GPU_VIDEO_DECODER_H_
+
diff --git a/chrome/gpu/gpu_video_decoder_mft.cc b/chrome/gpu/gpu_video_decoder_mft.cc
new file mode 100644
index 0000000..8c16201
--- /dev/null
+++ b/chrome/gpu/gpu_video_decoder_mft.cc
@@ -0,0 +1,594 @@
+// Copyright (c) 2010 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "chrome/gpu/gpu_video_decoder_mft.h"
+
+#if defined(OS_WIN)
+
+#pragma comment(lib, "dxva2.lib")
+#pragma comment(lib, "d3d9.lib")
+#pragma comment(lib, "evr.lib")
+#pragma comment(lib, "mf.lib")
+#pragma comment(lib, "mfplat.lib")
+#pragma comment(lib, "mfuuid.lib")
+#pragma comment(lib, "strmiids.lib")
+
+GpuVideoDecoderMFT::GpuVideoDecoderMFT(
+ const GpuVideoDecoderInfoParam* param,
+ GpuChannel* channel_,
+ base::ProcessHandle handle)
+ : GpuVideoDecoder(param, channel_, handle),
+ state_(kNormal) {
+ output_transfer_buffer_busy_ = false;
+ pending_request_ = 0;
+}
+
+bool GpuVideoDecoderMFT::StartupComLibraries() {
+ HRESULT hr;
+ hr = CoInitializeEx(NULL,
+ COINIT_APARTMENTTHREADED | COINIT_DISABLE_OLE1DDE);
+ if (FAILED(hr)) {
+ LOG(ERROR) << "CoInit fail";
+ return false;
+ }
+
+ hr = MFStartup(MF_VERSION, MFSTARTUP_FULL);
+ if (FAILED(hr)) {
+ LOG(ERROR) << "MFStartup fail";
+ CoUninitialize();
+ return false;
+ }
+ return true;
+}
+
+void GpuVideoDecoderMFT::ShutdownComLibraries() {
+ HRESULT hr;
+ hr = MFShutdown();
+ if (FAILED(hr)) {
+ LOG(WARNING) << "Warning: MF failed to shutdown";
+ }
+ CoUninitialize();
+}
+
+// Creates a Media Foundation sample with one buffer containing a copy of the
+// given Annex B stream data.
+// If duration and sample_time are not known, provide 0.
+// min_size specifies the minimum size of the buffer (might be required by
+// the decoder for input). The times here should be given in 100ns units.
+IMFSample* GpuVideoDecoderMFT::CreateInputSample(uint8* data,
+ int32 size,
+ int64 timestamp,
+ int64 duration,
+ int32 min_size) {
+ ScopedComPtr<IMFSample> sample;
+ HRESULT hr = MFCreateSample(sample.Receive());
+ if (FAILED(hr) || !sample.get()) {
+ LOG(ERROR) << "Unable to create an empty sample";
+ return NULL;
+ }
+
+ ScopedComPtr<IMFMediaBuffer> buffer;
+ int32 buffer_length = min_size > size ? min_size : size;
+ hr = MFCreateMemoryBuffer(buffer_length, buffer.Receive());
+ if (FAILED(hr)) {
+ LOG(ERROR) << "Unable to create an empty buffer";
+ return NULL;
+ }
+
+ hr = sample->AddBuffer(buffer.get());
+ if (FAILED(hr)) {
+ LOG(ERROR) << "Failed to add empty buffer to sample";
+ return NULL;
+ }
+
+ if (duration > 0 && FAILED(sample->SetSampleDuration(duration))) {
+ LOG(ERROR) << "Failed to set sample duration";
+ return NULL;
+ }
+
+ if (timestamp > 0 && FAILED(sample->SetSampleTime(timestamp))) {
+ LOG(ERROR) << "Failed to set sample time";
+ return NULL;
+ }
+
+ DWORD max_length, current_length;
+ uint8* buffer_data;
+ hr = buffer->Lock(&buffer_data, &max_length, &current_length);
+ if (FAILED(hr)) {
+ LOG(ERROR) << "Failed to lock buffer";
+ return NULL;
+ }
+ CHECK_GE(static_cast<int>(max_length), size);
+ memcpy(buffer_data, data, size);
+ CHECK(SUCCEEDED(buffer->Unlock()));
+
+ hr = buffer->SetCurrentLength(size);
+ if (FAILED(hr)) {
+ LOG(ERROR) << "Failed to set current length to " << size;
+ return NULL;
+ }
+
+ return sample.Detach();
+}
+
+bool GpuVideoDecoderMFT::CreateD3DDevManager(HWND video_window) {
+ d3d9_.Attach(Direct3DCreate9(D3D_SDK_VERSION));
+ if (d3d9_.get() == NULL) {
+ LOG(ERROR) << "Failed to create D3D9";
+ return false;
+ }
+
+ D3DPRESENT_PARAMETERS present_params = {0};
+ present_params.BackBufferWidth = init_param_.width_;
+ present_params.BackBufferHeight = init_param_.height_;
+ present_params.BackBufferFormat = D3DFMT_UNKNOWN;
+ present_params.BackBufferCount = 1;
+ present_params.SwapEffect = D3DSWAPEFFECT_DISCARD;
+ present_params.hDeviceWindow = video_window;
+ present_params.Windowed = TRUE;
+ present_params.Flags = D3DPRESENTFLAG_VIDEO;
+ present_params.FullScreen_RefreshRateInHz = 0;
+ present_params.PresentationInterval = 0;
+
+ // D3DCREATE_HARDWARE_VERTEXPROCESSING specifies hardware vertex processing.
+ // (Is it even needed for just video decoding?)
+ HRESULT hr = d3d9_->CreateDevice(D3DADAPTER_DEFAULT,
+ D3DDEVTYPE_HAL,
+ video_window,
+ D3DCREATE_HARDWARE_VERTEXPROCESSING,
+ &present_params,
+ device_.Receive());
+ if (FAILED(hr)) {
+ LOG(ERROR) << "Failed to create D3D Device";
+ return false;
+ }
+
+ UINT dev_manager_reset_token = 0;
+ hr = DXVA2CreateDirect3DDeviceManager9(&dev_manager_reset_token,
+ device_manager_.Receive());
+ if (FAILED(hr)) {
+ LOG(ERROR) << "Couldn't create D3D Device manager";
+ return false;
+ }
+
+ hr = device_manager_->ResetDevice(device_.get(),
+ dev_manager_reset_token);
+ if (FAILED(hr)) {
+ LOG(ERROR) << "Failed to set device to device manager";
+ return false;
+ }
+
+ return true;
+}
+
+bool GpuVideoDecoderMFT::InitMediaFoundation() {
+ if (!StartupComLibraries())
+ return false;
+
+ if (!CreateD3DDevManager(GetDesktopWindow()))
+ return false;
+
+ if (!InitDecoder())
+ return false;
+
+ if (!GetStreamsInfoAndBufferReqs())
+ return false;
+
+ return SendMFTMessage(MFT_MESSAGE_NOTIFY_START_OF_STREAM);
+}
+
+bool GpuVideoDecoderMFT::InitDecoder() {
+ // TODO(jiesun): use MFEnum to get decoder CLSID.
+ HRESULT hr = CoCreateInstance(__uuidof(CMSH264DecoderMFT),
+ NULL,
+ CLSCTX_INPROC_SERVER,
+ __uuidof(IMFTransform),
+ reinterpret_cast<void**>(decoder_.Receive()));
+ if (FAILED(hr) || !decoder_.get()) {
+ LOG(ERROR) << "CoCreateInstance failed " << std::hex << std::showbase << hr;
+ return false;
+ }
+
+ if (!CheckDecoderDxvaSupport())
+ return false;
+
+ hr = decoder_->ProcessMessage(
+ MFT_MESSAGE_SET_D3D_MANAGER,
+ reinterpret_cast<ULONG_PTR>(device_manager_.get()));
+ if (FAILED(hr)) {
+ LOG(ERROR) << "Failed to set D3D9 device to decoder";
+ return false;
+ }
+
+ return SetDecoderMediaTypes();
+}
+
+bool GpuVideoDecoderMFT::CheckDecoderDxvaSupport() {
+ ScopedComPtr<IMFAttributes> attributes;
+ HRESULT hr = decoder_->GetAttributes(attributes.Receive());
+ if (FAILED(hr)) {
+ LOG(ERROR) << "Unlock: Failed to get attributes, hr = "
+ << std::hex << std::showbase << hr;
+ return false;
+ }
+
+ UINT32 dxva;
+ hr = attributes->GetUINT32(MF_SA_D3D_AWARE, &dxva);
+ if (FAILED(hr) || !dxva) {
+ LOG(ERROR) << "Failed to get DXVA attr, hr = "
+ << std::hex << std::showbase << hr
+ << "this might not be the right decoder.";
+ return false;
+ }
+ return true;
+}
+
+bool GpuVideoDecoderMFT::SetDecoderMediaTypes() {
+ return SetDecoderInputMediaType() &&
+ SetDecoderOutputMediaType(MFVideoFormat_NV12);
+}
+
+bool GpuVideoDecoderMFT::SetDecoderInputMediaType() {
+ ScopedComPtr<IMFMediaType> media_type;
+ HRESULT hr = MFCreateMediaType(media_type.Receive());
+ if (FAILED(hr)) {
+ LOG(ERROR) << "Failed to create empty media type object";
+ return false;
+ }
+
+ hr = media_type->SetGUID(MF_MT_MAJOR_TYPE, MFMediaType_Video);
+ if (FAILED(hr)) {
+ LOG(ERROR) << "SetGUID for major type failed";
+ return false;
+ }
+
+ hr = media_type->SetGUID(MF_MT_SUBTYPE, MFVideoFormat_H264);
+ if (FAILED(hr)) {
+ LOG(ERROR) << "SetGUID for subtype failed";
+ return false;
+ }
+
+ hr = decoder_->SetInputType(0, media_type.get(), 0); // No flags
+ if (FAILED(hr)) {
+ LOG(ERROR) << "Failed to set decoder's input type";
+ return false;
+ }
+
+ return true;
+}
+
+bool GpuVideoDecoderMFT::SetDecoderOutputMediaType(const GUID subtype) {
+ DWORD i = 0;
+ IMFMediaType* out_media_type;
+ bool found = false;
+ while (SUCCEEDED(decoder_->GetOutputAvailableType(0, i, &out_media_type))) {
+ GUID out_subtype;
+ HRESULT hr = out_media_type->GetGUID(MF_MT_SUBTYPE, &out_subtype);
+ if (FAILED(hr)) {
+ LOG(ERROR) << "Failed to GetGUID() on GetOutputAvailableType() " << i;
+ out_media_type->Release();
+ continue;
+ }
+ if (out_subtype == subtype) {
+ hr = decoder_->SetOutputType(0, out_media_type, 0); // No flags
+ if (FAILED(hr)) {
+ LOG(ERROR) << "Failed to SetOutputType to |subtype| or obtain "
+ << "width/height/stride " << std::hex << hr;
+ } else {
+ out_media_type->Release();
+ return true;
+ }
+ }
+ i++;
+ out_media_type->Release();
+ }
+ return false;
+}
+
+bool GpuVideoDecoderMFT::SendMFTMessage(MFT_MESSAGE_TYPE msg) {
+ HRESULT hr = decoder_->ProcessMessage(msg, NULL);
+ return SUCCEEDED(hr);
+}
+
+// Prints out info about the input/output streams, gets the minimum buffer sizes
+// for input and output samples.
+// The MFT will not allocate buffer for neither input nor output, so we have
+// to do it ourselves and make sure they're the correct size.
+// Exception is when dxva is enabled, the decoder will allocate output.
+bool GpuVideoDecoderMFT::GetStreamsInfoAndBufferReqs() {
+ HRESULT hr = decoder_->GetInputStreamInfo(0, &input_stream_info_);
+ if (FAILED(hr)) {
+ LOG(ERROR) << "Failed to get input stream info";
+ return false;
+ }
+ LOG(INFO) << "Input stream info: ";
+ LOG(INFO) << "Max latency: " << input_stream_info_.hnsMaxLatency;
+
+ // There should be three flags, one for requiring a whole frame be in a
+ // single sample, one for requiring there be one buffer only in a single
+ // sample, and one that specifies a fixed sample size. (as in cbSize)
+ LOG(INFO) << "Flags: "
+ << std::hex << std::showbase << input_stream_info_.dwFlags;
+ CHECK_EQ(static_cast<int>(input_stream_info_.dwFlags), 0x7);
+ LOG(INFO) << "Min buffer size: " << input_stream_info_.cbSize;
+ LOG(INFO) << "Max lookahead: " << input_stream_info_.cbMaxLookahead;
+ LOG(INFO) << "Alignment: " << input_stream_info_.cbAlignment;
+ if (input_stream_info_.cbAlignment > 0) {
+ LOG(WARNING) << "Warning: Decoder requires input to be aligned";
+ }
+
+ hr = decoder_->GetOutputStreamInfo(0, &output_stream_info_);
+ if (FAILED(hr)) {
+ LOG(ERROR) << "Failed to get output stream info";
+ return false;
+ }
+ LOG(INFO) << "Output stream info: ";
+
+ // The flags here should be the same and mean the same thing, except when
+ // DXVA is enabled, there is an extra 0x100 flag meaning decoder will
+ // allocate its own sample.
+ CHECK_EQ(static_cast<int>(output_stream_info_.dwFlags), 0x107);
+ LOG(INFO) << "Min buffer size: " << output_stream_info_.cbSize;
+ LOG(INFO) << "Alignment: " << output_stream_info_.cbAlignment;
+ if (output_stream_info_.cbAlignment > 0) {
+ LOG(WARNING) << "Warning: Decoder requires output to be aligned";
+ }
+
+ return true;
+}
+
+bool GpuVideoDecoderMFT::DoInitialize(
+ const GpuVideoDecoderInitParam& param,
+ GpuVideoDecoderInitDoneParam* done_param) {
+ LOG(ERROR) << "GpuVideoDecoderMFT::DoInitialize";
+
+ done_param->format_ =
+ GpuVideoDecoderInitDoneParam::SurfaceFormat_YV12;
+ done_param->surface_type_ =
+ GpuVideoDecoderInitDoneParam::SurfaceTypeSystemMemory;
+ done_param->input_buffer_handle_ = base::SharedMemory::NULLHandle();
+ done_param->output_buffer_handle_ = base::SharedMemory::NULLHandle();
+
+ do {
+ done_param->success_ = false;
+
+ if (!InitMediaFoundation())
+ break;
+
+ // TODO(jiesun): Check the assumption of input size < original size.
+ done_param->input_buffer_size_ = param.width_ * param.height_ * 3 / 2;
+ input_transfer_buffer_.reset(new base::SharedMemory);
+ if (!input_transfer_buffer_->Create(std::wstring(), false, false,
+ done_param->input_buffer_size_))
+ break;
+ if (!input_transfer_buffer_->Map(done_param->input_buffer_size_))
+ break;
+
+ // TODO(jiesun): Allocate this according to the surface format.
+ // The format actually could change during streaming, we need to
+ // notify GpuVideoDecoderHost side when this happened and renegotiate
+ // the transfer buffer.
+ done_param->output_buffer_size_ = param.width_ * param.height_ * 3 / 2;
+ output_transfer_buffer_.reset(new base::SharedMemory);
+ if (!output_transfer_buffer_->Create(std::wstring(), false, false,
+ done_param->output_buffer_size_))
+ break;
+ if (!output_transfer_buffer_->Map(done_param->output_buffer_size_))
+ break;
+
+ if (!input_transfer_buffer_->ShareToProcess(
+ renderer_handle_,
+ &done_param->input_buffer_handle_))
+ break;
+ if (!output_transfer_buffer_->ShareToProcess(
+ renderer_handle_,
+ &done_param->output_buffer_handle_))
+ break;
+
+ done_param->success_ = true;
+ } while (0);
+
+ SendInitializeDone(*done_param);
+ return true;
+}
+
+bool GpuVideoDecoderMFT::DoUninitialize() {
+ LOG(ERROR) << "GpuVideoDecoderMFT::DoUninitialize";
+ SendUninitializeDone();
+ return true;
+}
+
+void GpuVideoDecoderMFT::DoEmptyThisBuffer(
+ const GpuVideoDecoderInputBufferParam& buffer) {
+ LOG(ERROR) << "GpuVideoDecoderMFT::EmptyThisBuffer";
+
+ CHECK(input_transfer_buffer_->memory());
+ ScopedComPtr<IMFSample> sample;
+ if (buffer.size_) {
+ uint8* data = static_cast<uint8*>(input_transfer_buffer_->memory());
+ sample.Attach(CreateInputSample(data,
+ buffer.size_,
+ buffer.timestamp_*10,
+ 0LL,
+ input_stream_info_.cbSize));
+ CHECK(sample.get());
+ } else {
+ state_ = kEosFlush;
+ }
+
+ input_buffer_queue_.push_back(sample);
+ SendEmptyBufferACK();
+
+ while (pending_request_)
+ if (!DoDecode()) break;
+}
+
+void GpuVideoDecoderMFT::DoFillThisBuffer(
+ const GpuVideoDecoderOutputBufferParam& frame) {
+ LOG(ERROR) << "GpuVideoDecoderMFT::FillThisBuffer";
+
+ pending_request_++;
+ while (pending_request_)
+ if (!DoDecode()) break;
+}
+
+void GpuVideoDecoderMFT::DoFillThisBufferDoneACK() {
+ output_transfer_buffer_busy_ = false;
+ pending_request_--;
+ while (pending_request_)
+ if (!DoDecode()) break;
+}
+
+void GpuVideoDecoderMFT::DoFlush() {
+ state_ = kFlushing;
+
+ while (!input_buffer_queue_.empty())
+ input_buffer_queue_.pop_front();
+ pending_request_ = 0;
+ // TODO(jiesun): this is wrong??
+ output_transfer_buffer_busy_ = false;
+ SendMFTMessage(MFT_MESSAGE_COMMAND_FLUSH);
+
+ state_ = kNormal;
+ SendFlushDone();
+}
+
+bool GpuVideoDecoderMFT::DoDecode() {
+ if (state_ != kNormal && state_ != kEosFlush) return false;
+ if (output_transfer_buffer_busy_) return false;
+
+ MFT_OUTPUT_DATA_BUFFER output_data_buffer;
+ memset(&output_data_buffer, 0, sizeof(output_data_buffer));
+ output_data_buffer.dwStreamID = 0;
+
+ ScopedComPtr<IMFSample> output_sample;
+ DWORD status;
+ HRESULT hr = decoder_->ProcessOutput(0, // No flags
+ 1, // # of out streams to pull from
+ &output_data_buffer,
+ &status);
+
+ IMFCollection* events = output_data_buffer.pEvents;
+ if (events != NULL) {
+ LOG(INFO) << "Got events from ProcessOuput, but discarding";
+ events->Release();
+ }
+
+ if (hr == MF_E_TRANSFORM_STREAM_CHANGE) {
+ hr = SetDecoderOutputMediaType(MFVideoFormat_NV12);
+ CHECK(SUCCEEDED(hr));
+ return true;
+ }
+ if (hr == MF_E_TRANSFORM_NEED_MORE_INPUT) {
+ if (input_buffer_queue_.empty()) {
+ if (state_ == kEosFlush) {
+ GpuVideoDecoderOutputBufferParam output_param;
+ output_param.timestamp_ = 0;
+ output_param.duration_ = 0;
+ output_param.flags_ =
+ GpuVideoDecoderOutputBufferParam::kFlagsEndOfStream;
+ output_transfer_buffer_busy_ = true;
+ SendFillBufferDone(output_param);
+ }
+ return false;
+ }
+ while (!input_buffer_queue_.empty()) {
+ ScopedComPtr<IMFSample> input_sample = input_buffer_queue_.front();
+ input_buffer_queue_.pop_front();
+
+ if (input_sample.get()) {
+ HRESULT hr = decoder_->ProcessInput(0, input_sample.get(), 0);
+ if (hr == MF_E_NOTACCEPTING) return true;
+ CHECK(SUCCEEDED(hr));
+ } else {
+ SendMFTMessage(MFT_MESSAGE_NOTIFY_END_OF_STREAM);
+ }
+
+ // If we already received the input EOS, we do not need to issue
+ // more requests for new samples.
+ if (state_ != kEosFlush)
+ SendEmptyBufferDone();
+ }
+ return true;
+ }
+
+ CHECK(SUCCEEDED(hr));
+ output_sample.Attach(output_data_buffer.pSample);
+ CHECK(output_sample.get());
+
+ int64 timestamp, duration;
+ output_sample->GetSampleTime(&timestamp);
+ output_sample->GetSampleDuration(&duration);
+
+ // The duration and timestamps are in 100-ns units, so divide by 10
+ // to convert to microseconds.
+ timestamp /= 10;
+ duration /= 10;
+
+ // Sanity checks for checking if there is really something in the sample.
+ DWORD buf_count;
+ hr = output_sample->GetBufferCount(&buf_count);
+ CHECK(SUCCEEDED(hr) && buf_count == 1);
+
+ ScopedComPtr<IMFMediaBuffer> output_buffer;
+ hr = output_sample->GetBufferByIndex(0, output_buffer.Receive());
+ CHECK(SUCCEEDED(hr));
+
+ ScopedComPtr<IDirect3DSurface9> surface;
+ hr = MFGetService(output_buffer, MR_BUFFER_SERVICE,
+ IID_PPV_ARGS(surface.Receive()));
+ CHECK(SUCCEEDED(hr));
+
+
+ // NV12 to YV12
+ D3DLOCKED_RECT d3dlocked_rect;
+ RECT rect = {0, 0, init_param_.width_, init_param_.height_};
+ hr = surface->LockRect(&d3dlocked_rect, &rect, 0);
+
+ if (SUCCEEDED(hr)) {
+ D3DSURFACE_DESC desc;
+ hr = surface->GetDesc(&desc);
+ CHECK(SUCCEEDED(hr));
+
+ uint32 src_stride = d3dlocked_rect.Pitch;
+ uint32 dst_stride = init_param_.width_;
+ uint8* src_y = static_cast<uint8*>(d3dlocked_rect.pBits);
+ uint8* src_uv = src_y + src_stride * desc.Height;
+ uint8* dst_y = static_cast<uint8*>(output_transfer_buffer_->memory());
+ uint8* dst_u = dst_y + dst_stride * init_param_.height_;
+ uint8* dst_v = dst_u + dst_stride * init_param_.height_ / 4;
+
+ for ( int y = 0 ; y < init_param_.height_; ++y ) {
+ for ( int x = 0 ; x < init_param_.width_ ; ++x ) {
+ dst_y[x] = src_y[x];
+ if (!(y & 1)) {
+ if (x & 1)
+ dst_v[x>>1] = src_uv[x];
+ else
+ dst_u[x>>1] = src_uv[x];
+ }
+ }
+ dst_y += dst_stride;
+ src_y += src_stride;
+ if (!(y & 1)) {
+ src_uv += src_stride;
+ dst_v += dst_stride >> 1;
+ dst_u += dst_stride >> 1;
+ }
+ }
+ hr = surface->UnlockRect();
+ CHECK(SUCCEEDED(hr));
+ }
+
+ GpuVideoDecoderOutputBufferParam output_param;
+ output_param.timestamp_ = timestamp;
+ output_param.duration_ = duration;
+ output_param.flags_ = 0;
+ output_transfer_buffer_busy_ = true;
+ SendFillBufferDone(output_param);
+ return true;
+}
+
+#endif
+
diff --git a/chrome/gpu/gpu_video_decoder_mft.h b/chrome/gpu/gpu_video_decoder_mft.h
new file mode 100644
index 0000000..3644617
--- /dev/null
+++ b/chrome/gpu/gpu_video_decoder_mft.h
@@ -0,0 +1,98 @@
+// Copyright (c) 2010 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef CHROME_GPU_GPU_VIDEO_DECODER_MFT_H_
+#define CHROME_GPU_GPU_VIDEO_DECODER_MFT_H_
+
+#include "build/build_config.h" // For OS_WIN.
+
+#if defined(OS_WIN)
+
+#include <d3d9.h>
+#include <dxva2api.h>
+#include <evr.h>
+#include <initguid.h>
+#include <mfapi.h>
+#include <mferror.h>
+#include <mfidl.h>
+#include <shlwapi.h>
+#include <wmcodecdsp.h>
+
+#include <deque>
+
+#include "base/scoped_comptr_win.h"
+#include "chrome/gpu/gpu_video_decoder.h"
+
+class GpuVideoDecoderMFT : public GpuVideoDecoder {
+ public:
+ virtual bool DoInitialize(const GpuVideoDecoderInitParam& init_param,
+ GpuVideoDecoderInitDoneParam* done_param);
+ virtual bool DoUninitialize();
+ virtual void DoFlush();
+ virtual void DoEmptyThisBuffer(const GpuVideoDecoderInputBufferParam& buffer);
+ virtual void DoFillThisBuffer(const GpuVideoDecoderOutputBufferParam& frame);
+ virtual void DoFillThisBufferDoneACK();
+
+ private:
+ GpuVideoDecoderMFT(const GpuVideoDecoderInfoParam* param,
+ GpuChannel* channel_,
+ base::ProcessHandle handle);
+
+ friend class GpuVideoService;
+
+ // TODO(jiesun): Find a way to move all these to GpuVideoService..
+ static bool StartupComLibraries();
+ static void ShutdownComLibraries();
+ bool CreateD3DDevManager(HWND video_window);
+
+ // helper.
+ bool InitMediaFoundation();
+ bool InitDecoder();
+ bool CheckDecoderDxvaSupport();
+
+ bool SetDecoderMediaTypes();
+ bool SetDecoderInputMediaType();
+ bool SetDecoderOutputMediaType(const GUID subtype);
+ bool SendMFTMessage(MFT_MESSAGE_TYPE msg);
+ bool GetStreamsInfoAndBufferReqs();
+
+ // Help function to create IMFSample* out of input buffer.
+ // data are copied into IMFSample's own IMFMediaBuffer.
+ // Client should Release() the IMFSample*.
+ static IMFSample* CreateInputSample(uint8* data,
+ int32 size,
+ int64 timestamp,
+ int64 duration,
+ int32 min_size);
+
+ bool DoDecode();
+
+ ScopedComPtr<IDirect3D9> d3d9_;
+ ScopedComPtr<IDirect3DDevice9> device_;
+ ScopedComPtr<IDirect3DDeviceManager9> device_manager_;
+ ScopedComPtr<IMFTransform> decoder_;
+
+ MFT_INPUT_STREAM_INFO input_stream_info_;
+ MFT_OUTPUT_STREAM_INFO output_stream_info_;
+
+ std::deque<ScopedComPtr<IMFSample> > input_buffer_queue_;
+ bool output_transfer_buffer_busy_;
+
+ typedef enum {
+ kNormal, // normal playing state.
+ kFlushing, // upon received Flush(), before FlushDone()
+ kEosFlush, // upon input EOS received.
+ kStopped, // upon output EOS received.
+ } State;
+ State state_;
+
+ int32 pending_request_;
+
+ DISALLOW_COPY_AND_ASSIGN(GpuVideoDecoderMFT);
+};
+
+#endif
+
+#endif // CHROME_GPU_GPU_VIDEO_DECODER_MFT_H_
+
diff --git a/chrome/gpu/gpu_video_service.cc b/chrome/gpu/gpu_video_service.cc
new file mode 100644
index 0000000..45b7063
--- /dev/null
+++ b/chrome/gpu/gpu_video_service.cc
@@ -0,0 +1,76 @@
+// Copyright (c) 2010 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "chrome/common/gpu_messages.h"
+#include "chrome/gpu/gpu_channel.h"
+#include "chrome/gpu/gpu_video_decoder_mft.h"
+#include "chrome/gpu/gpu_video_service.h"
+
+GpuVideoService::GpuVideoService() : next_available_decoder_id_(0) {
+ // TODO(jiesun): move this time consuming stuff out of here.
+ IntializeGpuVideoService();
+}
+GpuVideoService::~GpuVideoService() {
+ // TODO(jiesun): move this time consuming stuff out of here.
+ UnintializeGpuVideoService();
+}
+
+void GpuVideoService::OnChannelConnected(int32 peer_pid) {
+ LOG(ERROR) << "GpuVideoService::OnChannelConnected";
+}
+
+void GpuVideoService::OnChannelError() {
+ LOG(ERROR) << "GpuVideoService::OnChannelError";
+}
+
+void GpuVideoService::OnMessageReceived(const IPC::Message& msg) {
+#if 0
+ IPC_BEGIN_MESSAGE_MAP(GpuVideoService, msg)
+ IPC_MESSAGE_UNHANDLED_ERROR()
+ IPC_END_MESSAGE_MAP()
+#endif
+}
+
+bool GpuVideoService::IntializeGpuVideoService() {
+ return true;
+}
+
+bool GpuVideoService::UnintializeGpuVideoService() {
+ return true;
+}
+
+bool GpuVideoService::CreateVideoDecoder(
+ GpuChannel* channel,
+ MessageRouter* router,
+ GpuVideoDecoderInfoParam* param) {
+ // TODO(jiesun): find a better way to determine which GpuVideoDecoder
+ // to return on current platform.
+#if defined(OS_WIN)
+ GpuVideoDecoderInfo decoder_info;
+ int32 decoder_id = GetNextAvailableDecoderID();
+ param->decoder_id_ = decoder_id;
+ base::ProcessHandle handle = channel->renderer_handle();
+ decoder_info.decoder_ = new GpuVideoDecoderMFT(param, channel, handle);
+ decoder_info.channel_ = channel;
+ decoder_info.param = *param;
+ decoder_map_[decoder_id] = decoder_info;
+ router->AddRoute(param->decoder_route_id_, decoder_info.decoder_);
+ return true;
+#else
+ return false;
+#endif
+}
+
+void GpuVideoService::DestroyVideoDecoder(
+ MessageRouter* router,
+ int32 decoder_id) {
+ int32 route_id = decoder_map_[decoder_id].param.decoder_route_id_;
+ router->RemoveRoute(route_id);
+ decoder_map_.erase(decoder_id);
+}
+
+int32 GpuVideoService::GetNextAvailableDecoderID() {
+ return ++next_available_decoder_id_;
+}
+
diff --git a/chrome/gpu/gpu_video_service.h b/chrome/gpu/gpu_video_service.h
new file mode 100644
index 0000000..3172031
--- /dev/null
+++ b/chrome/gpu/gpu_video_service.h
@@ -0,0 +1,56 @@
+// Copyright (c) 2010 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef CHROME_GPU_GPU_VIDEO_SERVICE_H_
+#define CHROME_GPU_GPU_VIDEO_SERVICE_H_
+
+#include <map>
+
+#include "base/scoped_ptr.h"
+#include "base/ref_counted.h"
+#include "base/singleton.h"
+#include "chrome/gpu/gpu_video_decoder.h"
+#include "ipc/ipc_channel.h"
+
+class GpuChannel;
+
+class GpuVideoService : public IPC::Channel::Listener,
+ public Singleton<GpuVideoService> {
+ public:
+ // IPC::Channel::Listener.
+ virtual void OnChannelConnected(int32 peer_pid);
+ virtual void OnChannelError();
+ virtual void OnMessageReceived(const IPC::Message& message);
+
+ bool CreateVideoDecoder(GpuChannel* channel,
+ MessageRouter* router,
+ GpuVideoDecoderInfoParam* param);
+ void DestroyVideoDecoder(MessageRouter* router,
+ int32 decoder_id);
+
+ private:
+ struct GpuVideoDecoderInfo {
+ scoped_refptr<GpuVideoDecoder> decoder_;
+ GpuChannel* channel_;
+ GpuVideoDecoderInfoParam param;
+ };
+
+ GpuVideoService();
+ virtual ~GpuVideoService();
+
+ std::map<int32, GpuVideoDecoderInfo> decoder_map_;
+ int32 next_available_decoder_id_;
+
+ // Specialize video service on different platform will override.
+ virtual bool IntializeGpuVideoService();
+ virtual bool UnintializeGpuVideoService();
+
+ int32 GetNextAvailableDecoderID();
+
+ friend struct DefaultSingletonTraits<GpuVideoService>;
+ DISALLOW_COPY_AND_ASSIGN(GpuVideoService);
+};
+
+#endif // CHROME_GPU_GPU_VIDEO_SERVICE_H_
+