summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorjiesun@google.com <jiesun@google.com@0039d316-1c4b-4281-b951-d872f2087c98>2010-08-09 15:59:44 +0000
committerjiesun@google.com <jiesun@google.com@0039d316-1c4b-4281-b951-d872f2087c98>2010-08-09 15:59:44 +0000
commitc9f28a7d16b71b17185b456e3b53e4e638d005d0 (patch)
tree5940cfb4244935b01608e286f84a83413e0329a1
parent6b1b2b6bc21270ef1b90a28293a1cb743e933383 (diff)
downloadchromium_src-c9f28a7d16b71b17185b456e3b53e4e638d005d0.zip
chromium_src-c9f28a7d16b71b17185b456e3b53e4e638d005d0.tar.gz
chromium_src-c9f28a7d16b71b17185b456e3b53e4e638d005d0.tar.bz2
Special thanks for in-ming cheng's MFT hardware decodering code.
1. ipc_video_decoder.cc/h is media pipeline filter which use the gpu decoder facilities in video stack. it is only enabled when (a) hardware composition is on (b) hardware decoding command line is on (c) h264 codec is specified. 2. gpu_video_service.cc/h is a singleton in gpu process which provide video services for renderer process, through it we could create decoder. ( in my imagination, in the future, we could create encoder or capturer too) 3. gpu_video_decoder.cc/h. abstract interface for hardware decoder. 4. gpu_video_decoder_mft.cc/h media foundation transform hardware decoder which run on windows 7 only. 5. gpu_video_service_host.cc/h is singleton in renderer process which provide proxy for gpu_video_service. 6. gpu_video_decoder_host.cc/h is proxy for gpu_video_decoder. (1 to 1 map).basically there is one global GpuVideoService in GPU process, one GpuVideoServiceHost in Renderer process. for each renderer process, there are could be multiple renderer view, each could had multiple GpuVideoDecoderHost the connect to GpuVideoDeocder through GPUCHannelHOst/GpuChannel. 7. gpu_video_common.cc/h: IPC message definition and pickle/marshaling support. ISSUES: 1. in media pipeline, we need let decoder to determine if bit stream filter should be used instead of let command line to determine it. 2. stop readback from D3D surface use ANGLE. 3. Flush logic still need fine tuning. 4. CreateThread in GpuVideoDecoder, and post message in message handler, and derived classs handle message loop. ? 5. Error handling. 6. Input ring buffer implementation. Current impl is naive. 7.Add output queue for MFT decoder. 8. Query Capabilities at GetVideoServices()... BUG=None TEST=Windows7 Review URL: http://codereview.chromium.org/2873089 git-svn-id: svn://svn.chromium.org/chrome/trunk/src@55405 0039d316-1c4b-4281-b951-d872f2087c98
-rw-r--r--chrome/browser/renderer_host/browser_render_process_host.cc2
-rw-r--r--chrome/chrome.gyp6
-rw-r--r--chrome/chrome_common.gypi2
-rw-r--r--chrome/chrome_renderer.gypi7
-rw-r--r--chrome/common/gpu_messages_internal.h80
-rw-r--r--chrome/common/gpu_video_common.cc211
-rw-r--r--chrome/common/gpu_video_common.h172
-rw-r--r--chrome/gpu/gpu_channel.cc52
-rw-r--r--chrome/gpu/gpu_channel.h5
-rw-r--r--chrome/gpu/gpu_video_decoder.cc108
-rw-r--r--chrome/gpu/gpu_video_decoder.h76
-rw-r--r--chrome/gpu/gpu_video_decoder_mft.cc594
-rw-r--r--chrome/gpu/gpu_video_decoder_mft.h98
-rw-r--r--chrome/gpu/gpu_video_service.cc76
-rw-r--r--chrome/gpu/gpu_video_service.h56
-rw-r--r--chrome/renderer/DEPS1
-rw-r--r--chrome/renderer/gpu_channel_host.cc4
-rw-r--r--chrome/renderer/gpu_channel_host.h1
-rw-r--r--chrome/renderer/gpu_video_decoder_host.cc199
-rw-r--r--chrome/renderer/gpu_video_decoder_host.h126
-rw-r--r--chrome/renderer/gpu_video_service_host.cc91
-rw-r--r--chrome/renderer/gpu_video_service_host.h51
-rw-r--r--chrome/renderer/media/ipc_video_decoder.cc344
-rw-r--r--chrome/renderer/media/ipc_video_decoder.h100
-rw-r--r--chrome/renderer/render_thread.cc3
-rw-r--r--chrome/renderer/render_view.cc9
-rw-r--r--ipc/ipc_message_utils.h2
-rw-r--r--media/base/media_switches.cc3
-rw-r--r--media/base/media_switches.h1
-rw-r--r--media/filters/ffmpeg_demuxer.cc4
30 files changed, 2481 insertions, 3 deletions
diff --git a/chrome/browser/renderer_host/browser_render_process_host.cc b/chrome/browser/renderer_host/browser_render_process_host.cc
index bee6232..c66facd 100644
--- a/chrome/browser/renderer_host/browser_render_process_host.cc
+++ b/chrome/browser/renderer_host/browser_render_process_host.cc
@@ -578,6 +578,8 @@ void BrowserRenderProcessHost::PropagateBrowserCommandLineToRenderer(
switches::kEnableRemoting,
switches::kEnableClickToPlay,
switches::kPrelaunchGpuProcess,
+ switches::kEnableAcceleratedCompositing,
+ switches::kEnableAcceleratedDecoding,
};
renderer_cmd->CopySwitchesFrom(browser_cmd, kSwitchNames,
arraysize(kSwitchNames));
diff --git a/chrome/chrome.gyp b/chrome/chrome.gyp
index 2d7228d..4245b66 100644
--- a/chrome/chrome.gyp
+++ b/chrome/chrome.gyp
@@ -708,6 +708,12 @@
'gpu/gpu_process.h',
'gpu/gpu_thread.cc',
'gpu/gpu_thread.h',
+ 'gpu/gpu_video_decoder.cc',
+ 'gpu/gpu_video_decoder.h',
+ 'gpu/gpu_video_decoder_mft.cc',
+ 'gpu/gpu_video_decoder_mft.h',
+ 'gpu/gpu_video_service.cc',
+ 'gpu/gpu_video_service.h',
'gpu/gpu_view_win.cc',
'gpu/gpu_view_win.h',
],
diff --git a/chrome/chrome_common.gypi b/chrome/chrome_common.gypi
index b019c63..c60359f 100644
--- a/chrome/chrome_common.gypi
+++ b/chrome/chrome_common.gypi
@@ -61,6 +61,8 @@
'common/gpu_messages.cc',
'common/gpu_messages.h',
'common/gpu_messages_internal.h',
+ 'common/gpu_video_common.cc',
+ 'common/gpu_video_common.h',
'common/indexed_db_key.cc',
'common/indexed_db_key.h',
'common/logging_chrome.cc',
diff --git a/chrome/chrome_renderer.gypi b/chrome/chrome_renderer.gypi
index a775909..c616afd 100644
--- a/chrome/chrome_renderer.gypi
+++ b/chrome/chrome_renderer.gypi
@@ -18,6 +18,7 @@
'../skia/skia.gyp:skia',
'../third_party/hunspell/hunspell.gyp:hunspell',
'../third_party/cld/cld.gyp:cld',
+ '../third_party/ffmpeg/ffmpeg.gyp:ffmpeg',
'../third_party/icu/icu.gyp:icui18n',
'../third_party/icu/icu.gyp:icuuc',
'../third_party/npapi/npapi.gyp:npapi',
@@ -64,6 +65,8 @@
'renderer/loadtimes_extension_bindings.cc',
'renderer/media/audio_renderer_impl.cc',
'renderer/media/audio_renderer_impl.h',
+ 'renderer/media/ipc_video_decoder.cc',
+ 'renderer/media/ipc_video_decoder.h',
'renderer/media/ipc_video_renderer.cc',
'renderer/media/ipc_video_renderer.h',
'renderer/net/predictor_queue.cc',
@@ -102,6 +105,10 @@
'renderer/geolocation_dispatcher.cc',
'renderer/geolocation_dispatcher.h',
'renderer/gpu_channel_host.cc',
+ 'renderer/gpu_video_decoder_host.cc',
+ 'renderer/gpu_video_decoder_host.h',
+ 'renderer/gpu_video_service_host.cc',
+ 'renderer/gpu_video_service_host.h',
'renderer/indexed_db_dispatcher.cc',
'renderer/indexed_db_dispatcher.h',
'renderer/gpu_channel_host.h',
diff --git a/chrome/common/gpu_messages_internal.h b/chrome/common/gpu_messages_internal.h
index b17135b..543f087 100644
--- a/chrome/common/gpu_messages_internal.h
+++ b/chrome/common/gpu_messages_internal.h
@@ -10,6 +10,7 @@
// from it via utility_messages.h.
#include "base/shared_memory.h"
#include "chrome/common/gpu_info.h"
+#include "chrome/common/gpu_video_common.h"
#include "gfx/size.h"
#include "ipc/ipc_channel_handle.h"
#include "ipc/ipc_message_macros.h"
@@ -155,6 +156,21 @@ IPC_BEGIN_MESSAGES(GpuChannel)
IPC_MESSAGE_CONTROL1(GpuChannelMsg_DestroyCommandBuffer,
int32 /* instance_id */)
+ // Get hardware video service routing id.
+ IPC_SYNC_MESSAGE_CONTROL0_1(GpuChannelMsg_GetVideoService,
+ GpuVideoServiceInfoParam)
+
+ // Create hardware video decoder && associate it with the output |decoder_id|;
+ // We need this to be control message because we had to map the GpuChannel and
+ // |decoder_id|.
+ IPC_SYNC_MESSAGE_CONTROL0_1(GpuChannelMsg_CreateVideoDecoder,
+ GpuVideoDecoderInfoParam)
+
+ // Release all resource of the hardware video decoder which was assocaited
+ // with the input |decoder_id|.
+ IPC_SYNC_MESSAGE_CONTROL1_0(GpuChannelMsg_DestroyVideoDecoder,
+ int32 /* decoder_id */)
+
IPC_END_MESSAGES(GpuChannel)
//------------------------------------------------------------------------------
@@ -241,3 +257,67 @@ IPC_BEGIN_MESSAGES(GpuCommandBuffer)
#endif
IPC_END_MESSAGES(GpuCommandBuffer)
+
+//------------------------------------------------------------------------------
+
+// GpuVideoDecoderMsgs : send from renderer process to gpu process.
+IPC_BEGIN_MESSAGES(GpuVideoDecoder)
+ // Initialize and configure GpuVideoDecoder asynchronously.
+ IPC_MESSAGE_ROUTED1(GpuVideoDecoderMsg_Initialize,
+ GpuVideoDecoderInitParam)
+
+ // Destroy and release GpuVideoDecoder asynchronously.
+ IPC_MESSAGE_ROUTED0(GpuVideoDecoderMsg_Destroy)
+
+ // Start decoder flushing operation.
+ IPC_MESSAGE_ROUTED0(GpuVideoDecoderMsg_Flush)
+
+ // Send input buffer to GpuVideoDecoder.
+ IPC_MESSAGE_ROUTED1(GpuVideoDecoderMsg_EmptyThisBuffer,
+ GpuVideoDecoderInputBufferParam)
+
+ // Require output buffer from GpuVideoDecoder.
+ IPC_MESSAGE_ROUTED1(GpuVideoDecoderMsg_FillThisBuffer,
+ GpuVideoDecoderOutputBufferParam)
+
+ // GpuVideoDecoderHost has consumed the output buffer.
+ // NOTE: this may only useful for copy back solution
+ // where output transfer buffer had to be guarded.
+ IPC_MESSAGE_ROUTED0(GpuVideoDecoderMsg_FillThisBufferDoneACK)
+
+IPC_END_MESSAGES(GpuVideoDecoder)
+
+//------------------------------------------------------------------------------
+
+// GpuVideoDecoderMsgs : send from gpu process to renderer process.
+IPC_BEGIN_MESSAGES(GpuVideoDecoderHost)
+ // Confirm GpuVideoDecoder had been initialized or failed to initialize.
+ IPC_MESSAGE_ROUTED1(GpuVideoDecoderHostMsg_InitializeACK,
+ GpuVideoDecoderInitDoneParam)
+
+ // Confrim GpuVideoDecoder had been destroyed properly.
+ IPC_MESSAGE_ROUTED0(GpuVideoDecoderHostMsg_DestroyACK)
+
+ // Confirm decoder had been flushed.
+ IPC_MESSAGE_ROUTED0(GpuVideoDecoderHostMsg_FlushACK)
+
+ // GpuVideoDecoder has consumed input buffer from transfer buffer.
+ IPC_MESSAGE_ROUTED0(GpuVideoDecoderHostMsg_EmptyThisBufferACK)
+
+ // GpuVideoDecoder require new input buffer.
+ IPC_MESSAGE_ROUTED0(GpuVideoDecoderHostMsg_EmptyThisBufferDone)
+
+ // GpuVideoDecoder report output buffer ready.
+ IPC_MESSAGE_ROUTED1(GpuVideoDecoderHostMsg_FillThisBufferDone,
+ GpuVideoDecoderOutputBufferParam)
+
+ // GpuVideoDecoder report output format change.
+ IPC_MESSAGE_ROUTED1(GpuVideoDecoderHostMsg_MediaFormatChange,
+ GpuVideoDecoderFormatChangeParam)
+
+ // GpuVideoDecoder report error.
+ IPC_MESSAGE_ROUTED1(GpuVideoDecoderHostMsg_ErrorNotification,
+ GpuVideoDecoderErrorInfoParam)
+
+IPC_END_MESSAGES(GpuVideoDecoderHost)
+
diff --git a/chrome/common/gpu_video_common.cc b/chrome/common/gpu_video_common.cc
new file mode 100644
index 0000000..06116f9
--- /dev/null
+++ b/chrome/common/gpu_video_common.cc
@@ -0,0 +1,211 @@
+// Copyright (c) 2010 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "chrome/common/gpu_video_common.h"
+
+namespace IPC {
+
+void ParamTraits<GpuVideoServiceInfoParam>::Write(
+ Message* m, const GpuVideoServiceInfoParam& p) {
+ m->WriteInt(p.video_service_route_id_);
+ m->WriteInt(p.video_service_host_route_id_);
+ m->WriteInt(p.service_available_);
+}
+
+bool ParamTraits<GpuVideoServiceInfoParam>::Read(
+ const Message* m, void** iter, GpuVideoServiceInfoParam* r) {
+ if (!m->ReadInt(iter, &r->video_service_route_id_) ||
+ !m->ReadInt(iter, &r->video_service_host_route_id_) ||
+ !m->ReadInt(iter, &r->service_available_))
+ return false;
+ return true;
+}
+
+void ParamTraits<GpuVideoServiceInfoParam>::Log(
+ const GpuVideoServiceInfoParam& p, std::wstring* l) {
+ l->append(StringPrintf(L"(%d, %d, %d)",
+ p.video_service_route_id_,
+ p.video_service_host_route_id_,
+ p.service_available_));
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+void ParamTraits<GpuVideoDecoderInfoParam>::Write(
+ Message* m, const GpuVideoDecoderInfoParam& p) {
+ m->WriteInt(p.decoder_id_);
+ m->WriteInt(p.decoder_route_id_);
+ m->WriteInt(p.decoder_host_route_id_);
+}
+
+bool ParamTraits<GpuVideoDecoderInfoParam>::Read(
+ const Message* m, void** iter, GpuVideoDecoderInfoParam* r) {
+ if (!m->ReadInt(iter, &r->decoder_id_) ||
+ !m->ReadInt(iter, &r->decoder_route_id_) ||
+ !m->ReadInt(iter, &r->decoder_host_route_id_))
+ return false;
+ return true;
+}
+
+void ParamTraits<GpuVideoDecoderInfoParam>::Log(
+ const GpuVideoDecoderInfoParam& p, std::wstring* l) {
+ l->append(StringPrintf(L"(%d, %d, %d)",
+ p.decoder_id_,
+ p.decoder_route_id_,
+ p.decoder_host_route_id_));
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+void ParamTraits<GpuVideoDecoderInitParam>::Write(
+ Message* m, const GpuVideoDecoderInitParam& p) {
+ m->WriteInt(p.codec_id_);
+ m->WriteInt(p.width_);
+ m->WriteInt(p.height_);
+}
+
+bool ParamTraits<GpuVideoDecoderInitParam>::Read(
+ const Message* m, void** iter, GpuVideoDecoderInitParam* r) {
+ if (!m->ReadInt(iter, &r->codec_id_) ||
+ !m->ReadInt(iter, &r->width_) ||
+ !m->ReadInt(iter, &r->height_))
+ return false;
+ return true;
+}
+
+void ParamTraits<GpuVideoDecoderInitParam>::Log(
+ const GpuVideoDecoderInitParam& p, std::wstring* l) {
+ l->append(StringPrintf(L"(%d, %d %d)", p.codec_id_, p.width_, p.height_));
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+void ParamTraits<GpuVideoDecoderInitDoneParam>::Write(
+ Message* m, const GpuVideoDecoderInitDoneParam& p) {
+ m->WriteInt(p.success_);
+ m->WriteInt(p.stride_);
+ m->WriteInt(p.format_);
+ m->WriteInt(p.surface_type_);
+ m->WriteInt(p.input_buffer_size_);
+ m->WriteInt(p.output_buffer_size_);
+ IPC::ParamTraits<base::SharedMemoryHandle>::Write(
+ m, p.input_buffer_handle_);
+ IPC::ParamTraits<base::SharedMemoryHandle>::Write(
+ m, p.output_buffer_handle_);
+}
+
+bool ParamTraits<GpuVideoDecoderInitDoneParam>::Read(
+ const Message* m, void** iter, GpuVideoDecoderInitDoneParam* r) {
+ if (!m->ReadInt(iter, &r->success_) ||
+ !m->ReadInt(iter, &r->stride_) ||
+ !m->ReadInt(iter, &r->format_) ||
+ !m->ReadInt(iter, &r->surface_type_) ||
+ !m->ReadInt(iter, &r->input_buffer_size_) ||
+ !m->ReadInt(iter, &r->output_buffer_size_) ||
+ !IPC::ParamTraits<base::SharedMemoryHandle>::Read(
+ m, iter, &r->input_buffer_handle_) ||
+ !IPC::ParamTraits<base::SharedMemoryHandle>::Read(
+ m, iter, &r->output_buffer_handle_))
+ return false;
+ return true;
+}
+
+void ParamTraits<GpuVideoDecoderInitDoneParam>::Log(
+ const GpuVideoDecoderInitDoneParam& p, std::wstring* l) {
+ l->append(StringPrintf(L"(%d)", p.stride_));
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+void ParamTraits<GpuVideoDecoderInputBufferParam>::Write(
+ Message* m, const GpuVideoDecoderInputBufferParam& p) {
+ m->WriteInt64(p.timestamp_);
+ m->WriteInt(p.offset_);
+ m->WriteInt(p.size_);
+}
+
+bool ParamTraits<GpuVideoDecoderInputBufferParam>::Read(
+ const Message* m, void** iter, GpuVideoDecoderInputBufferParam* r) {
+ if (!m->ReadInt64(iter, &r->timestamp_) ||
+ !m->ReadInt(iter, &r->offset_) ||
+ !m->ReadInt(iter, &r->size_))
+ return false;
+ return true;
+}
+
+void ParamTraits<GpuVideoDecoderInputBufferParam>::Log(
+ const GpuVideoDecoderInputBufferParam& p, std::wstring* l) {
+ l->append(StringPrintf(L"(%d %d %d)",
+ static_cast<int>(p.timestamp_),
+ p.offset_, p.size_));
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+void ParamTraits<GpuVideoDecoderOutputBufferParam>::Write(
+ Message* m, const GpuVideoDecoderOutputBufferParam& p) {
+ m->WriteInt64(p.timestamp_);
+ m->WriteInt64(p.duration_);
+ m->WriteInt(p.flags_);
+}
+
+bool ParamTraits<GpuVideoDecoderOutputBufferParam>::Read(
+ const Message* m, void** iter, GpuVideoDecoderOutputBufferParam* r) {
+ if (!m->ReadInt64(iter, &r->timestamp_) ||
+ !m->ReadInt64(iter, &r->duration_) ||
+ !m->ReadInt(iter, &r->flags_))
+ return false;
+ return true;
+}
+
+void ParamTraits<GpuVideoDecoderOutputBufferParam>::Log(
+ const GpuVideoDecoderOutputBufferParam& p, std::wstring* l) {
+ l->append(StringPrintf(L"(%d %d) %x",
+ static_cast<int>(p.timestamp_),
+ static_cast<int>(p.duration_),
+ p.flags_));
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+void ParamTraits<GpuVideoDecoderErrorInfoParam>::Write(
+ Message* m, const GpuVideoDecoderErrorInfoParam& p) {
+ m->WriteInt(p.error_id);
+}
+
+bool ParamTraits<GpuVideoDecoderErrorInfoParam>::Read(
+ const Message* m, void** iter, GpuVideoDecoderErrorInfoParam* r) {
+ if (!m->ReadInt(iter, &r->error_id))
+ return false;
+ return true;
+}
+
+void ParamTraits<GpuVideoDecoderErrorInfoParam>::Log(
+ const GpuVideoDecoderErrorInfoParam& p, std::wstring* l) {
+ l->append(StringPrintf(L"(%d)", p.error_id));
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+void ParamTraits<GpuVideoDecoderFormatChangeParam>::Write(
+ Message* m, const GpuVideoDecoderFormatChangeParam& p) {
+ m->WriteInt(p.input_buffer_size_);
+ m->WriteInt(p.output_buffer_size_);
+}
+
+bool ParamTraits<GpuVideoDecoderFormatChangeParam>::Read(
+ const Message* m, void** iter, GpuVideoDecoderFormatChangeParam* r) {
+ if (!m->ReadInt(iter, &r->input_buffer_size_) ||
+ !m->ReadInt(iter, &r->output_buffer_size_))
+ return false;
+ return true;
+}
+
+void ParamTraits<GpuVideoDecoderFormatChangeParam>::Log(
+ const GpuVideoDecoderFormatChangeParam& p, std::wstring* l) {
+ l->append(StringPrintf(L"(%d %d)", p.input_buffer_size_,
+ p.output_buffer_size_));
+}
+};
+
diff --git a/chrome/common/gpu_video_common.h b/chrome/common/gpu_video_common.h
new file mode 100644
index 0000000..34974a6
--- /dev/null
+++ b/chrome/common/gpu_video_common.h
@@ -0,0 +1,172 @@
+// Copyright (c) 2010 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef CHROME_COMMON_GPU_VIDEO_COMMON_H_
+#define CHROME_COMMON_GPU_VIDEO_COMMON_H_
+
+#include "base/basictypes.h"
+#include "base/shared_memory.h"
+#include "chrome/common/common_param_traits.h"
+
+class GpuVideoServiceInfoParam {
+ public:
+ // route id for GpuVideoService on GPU process side for this channel.
+ int32 video_service_route_id_;
+ // route id for GpuVideoServiceHost on Render process side for this channel.
+ int32 video_service_host_route_id_;
+ // TODO(jiesun): define capabilities of video service.
+ int32 service_available_;
+};
+
+class GpuVideoDecoderInfoParam {
+ public:
+ // global decoder id.
+ int32 decoder_id_;
+ // route id for GpuVideoDecoder on GPU process side for this channel.
+ int32 decoder_route_id_;
+ // route id for GpuVideoServiceHost on Render process side for this channel.
+ int32 decoder_host_route_id_;
+};
+
+class GpuVideoDecoderInitParam {
+ public:
+ int32 codec_id_;
+ int32 width_;
+ int32 height_;
+ int32 profile_;
+ int32 level_;
+ int32 frame_rate_den_;
+ int32 frame_rate_num_;
+ int32 aspect_ratio_den_;
+ int32 aspect_ratio_num_;
+};
+
+class GpuVideoDecoderInitDoneParam {
+ public:
+ enum SurfaceType {
+ SurfaceTypeSystemMemory,
+ SurfaceTypeD3D,
+ SurfaceTypeEGLImage,
+ };
+ enum SurfaceFormat {
+ SurfaceFormat_YV12,
+ SurfaceFormat_NV12,
+ SurfaceFormat_XRGB,
+ };
+ int32 success_; // other parameter is only meaningful when this is true.
+ int32 provides_buffer;
+ int32 format_;
+ int32 surface_type_;
+ int32 stride_;
+ int32 input_buffer_size_;
+ int32 output_buffer_size_;
+ base::SharedMemoryHandle input_buffer_handle_;
+ // we do not need this if hardware composition is ready.
+ base::SharedMemoryHandle output_buffer_handle_;
+};
+
+class GpuVideoDecoderInputBufferParam {
+ public:
+ int64 timestamp_; // In unit of microseconds.
+ int32 offset_;
+ int32 size_;
+ int32 flags_; // miscellaneous flag bit mask
+};
+
+class GpuVideoDecoderOutputBufferParam {
+ public:
+ int64 timestamp_; // In unit of microseconds.
+ int64 duration_; // In unit of microseconds.
+ int32 flags_; // miscellaneous flag bit mask
+
+ enum {
+ kFlagsEndOfStream = 0x00000001,
+ kFlagsDiscontinuous = 0x00000002,
+ };
+};
+
+class GpuVideoDecoderErrorInfoParam {
+ public:
+ int32 error_id; // TODO(jiesun): define enum.
+};
+
+// TODO(jiesun): define this.
+class GpuVideoDecoderFormatChangeParam {
+ public:
+ int32 stride_;
+ int32 input_buffer_size_;
+ int32 output_buffer_size_;
+ base::SharedMemoryHandle input_buffer_handle_;
+ base::SharedMemoryHandle output_buffer_handle_;
+};
+
+namespace IPC {
+
+template <>
+struct ParamTraits<GpuVideoServiceInfoParam> {
+ typedef GpuVideoServiceInfoParam param_type;
+ static void Write(Message* m, const param_type& p);
+ static bool Read(const Message* m, void** iter, param_type* r);
+ static void Log(const param_type& p, std::wstring* l);
+};
+
+template <>
+struct ParamTraits<GpuVideoDecoderInfoParam> {
+ typedef GpuVideoDecoderInfoParam param_type;
+ static void Write(Message* m, const param_type& p);
+ static bool Read(const Message* m, void** iter, param_type* r);
+ static void Log(const param_type& p, std::wstring* l);
+};
+
+template <>
+struct ParamTraits<GpuVideoDecoderInitParam> {
+ typedef GpuVideoDecoderInitParam param_type;
+ static void Write(Message* m, const param_type& p);
+ static bool Read(const Message* m, void** iter, param_type* r);
+ static void Log(const param_type& p, std::wstring* l);
+};
+
+template <>
+struct ParamTraits<GpuVideoDecoderInitDoneParam> {
+ typedef GpuVideoDecoderInitDoneParam param_type;
+ static void Write(Message* m, const param_type& p);
+ static bool Read(const Message* m, void** iter, param_type* r);
+ static void Log(const param_type& p, std::wstring* l);
+};
+
+template <>
+struct ParamTraits<GpuVideoDecoderInputBufferParam> {
+ typedef GpuVideoDecoderInputBufferParam param_type;
+ static void Write(Message* m, const param_type& p);
+ static bool Read(const Message* m, void** iter, param_type* r);
+ static void Log(const param_type& p, std::wstring* l);
+};
+
+template <>
+struct ParamTraits<GpuVideoDecoderOutputBufferParam> {
+ typedef GpuVideoDecoderOutputBufferParam param_type;
+ static void Write(Message* m, const param_type& p);
+ static bool Read(const Message* m, void** iter, param_type* r);
+ static void Log(const param_type& p, std::wstring* l);
+};
+
+template <>
+struct ParamTraits<GpuVideoDecoderErrorInfoParam> {
+ typedef GpuVideoDecoderErrorInfoParam param_type;
+ static void Write(Message* m, const param_type& p);
+ static bool Read(const Message* m, void** iter, param_type* r);
+ static void Log(const param_type& p, std::wstring* l);
+};
+
+template <>
+struct ParamTraits<GpuVideoDecoderFormatChangeParam> {
+ typedef GpuVideoDecoderFormatChangeParam param_type;
+ static void Write(Message* m, const param_type& p);
+ static bool Read(const Message* m, void** iter, param_type* r);
+ static void Log(const param_type& p, std::wstring* l);
+};
+};
+
+#endif // CHROME_COMMON_GPU_VIDEO_COMMON_H_
+
diff --git a/chrome/gpu/gpu_channel.cc b/chrome/gpu/gpu_channel.cc
index 69c66d4..a63faa9 100644
--- a/chrome/gpu/gpu_channel.cc
+++ b/chrome/gpu/gpu_channel.cc
@@ -17,6 +17,7 @@
#include "chrome/common/chrome_switches.h"
#include "chrome/common/gpu_messages.h"
#include "chrome/gpu/gpu_thread.h"
+#include "chrome/gpu/gpu_video_service.h"
#if defined(OS_POSIX)
#include "ipc/ipc_channel_posix.h"
@@ -88,6 +89,12 @@ void GpuChannel::OnControlMessageReceived(const IPC::Message& msg) {
OnCreateOffscreenCommandBuffer)
IPC_MESSAGE_HANDLER(GpuChannelMsg_DestroyCommandBuffer,
OnDestroyCommandBuffer)
+ IPC_MESSAGE_HANDLER(GpuChannelMsg_GetVideoService,
+ OnGetVideoService)
+ IPC_MESSAGE_HANDLER(GpuChannelMsg_CreateVideoDecoder,
+ OnCreateVideoDecoder)
+ IPC_MESSAGE_HANDLER(GpuChannelMsg_DestroyVideoDecoder,
+ OnDestroyVideoDecoder)
IPC_MESSAGE_UNHANDLED_ERROR()
IPC_END_MESSAGE_MAP()
}
@@ -178,6 +185,50 @@ void GpuChannel::OnDestroyCommandBuffer(int32 route_id) {
#endif
}
+void GpuChannel::OnGetVideoService(GpuVideoServiceInfoParam* info) {
+ info->service_available_ = 0;
+#if defined(ENABLE_GPU)
+#if defined(OS_WIN)
+ // TODO(jiesun): Not every windows platforms will support our media
+ // foundation implementation. Add more check here.
+ LOG(INFO) << "GpuChannel::OnGetVideoService";
+ GpuVideoService* service = GpuVideoService::get();
+ if (service == NULL)
+ return;
+
+ info->video_service_host_route_id_ = GenerateRouteID();
+ info->video_service_route_id_ = GenerateRouteID();
+ // TODO(jiesun): we could had multiple entries in this routing table.
+ router_.AddRoute(info->video_service_route_id_, service);
+ info->service_available_ = 1;
+#endif
+#endif
+}
+
+void GpuChannel::OnCreateVideoDecoder(GpuVideoDecoderInfoParam* info) {
+#if defined(ENABLE_GPU)
+ LOG(INFO) << "GpuChannel::OnCreateVideoDecoder";
+ info->decoder_id_ = -1;
+ GpuVideoService* service = GpuVideoService::get();
+ if (service == NULL)
+ return;
+
+ info->decoder_host_route_id_ = GenerateRouteID();
+ info->decoder_route_id_ = GenerateRouteID();
+ service->CreateVideoDecoder(this, &router_, info);
+#endif
+}
+
+void GpuChannel::OnDestroyVideoDecoder(int32 decoder_id) {
+#if defined(ENABLE_GPU)
+ LOG(ERROR) << "GpuChannel::OnDestroyVideoDecoder";
+ GpuVideoService* service = GpuVideoService::get();
+ if (service == NULL)
+ return;
+ service->DestroyVideoDecoder(&router_, decoder_id);
+#endif
+}
+
bool GpuChannel::Init() {
// Check whether we're already initialized.
if (channel_.get())
@@ -198,6 +249,7 @@ bool GpuChannel::Init() {
channel_name, IPC::Channel::MODE_SERVER, this, NULL,
ChildProcess::current()->io_message_loop(), false,
ChildProcess::current()->GetShutDownEvent()));
+
return true;
}
diff --git a/chrome/gpu/gpu_channel.h b/chrome/gpu/gpu_channel.h
index da6601f..f21007f 100644
--- a/chrome/gpu/gpu_channel.h
+++ b/chrome/gpu/gpu_channel.h
@@ -12,6 +12,7 @@
#include "base/scoped_open_process.h"
#include "base/scoped_ptr.h"
#include "build/build_config.h"
+#include "chrome/common/gpu_video_common.h"
#include "chrome/common/message_router.h"
#include "chrome/gpu/gpu_command_buffer_stub.h"
#include "gfx/native_widget_types.h"
@@ -72,6 +73,10 @@ class GpuChannel : public IPC::Channel::Listener,
int32* route_id);
void OnDestroyCommandBuffer(int32 route_id);
+ void OnGetVideoService(GpuVideoServiceInfoParam* info);
+ void OnCreateVideoDecoder(GpuVideoDecoderInfoParam* info);
+ void OnDestroyVideoDecoder(int32 decoder_id);
+
scoped_ptr<IPC::SyncChannel> channel_;
// Handle to the renderer process who is on the other side of the channel.
diff --git a/chrome/gpu/gpu_video_decoder.cc b/chrome/gpu/gpu_video_decoder.cc
new file mode 100644
index 0000000..fa478bf
--- /dev/null
+++ b/chrome/gpu/gpu_video_decoder.cc
@@ -0,0 +1,108 @@
+// Copyright (c) 2010 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "chrome/common/gpu_messages.h"
+#include "chrome/gpu/gpu_channel.h"
+#include "chrome/gpu/gpu_video_decoder.h"
+
+void GpuVideoDecoder::OnChannelConnected(int32 peer_pid) {
+}
+
+void GpuVideoDecoder::OnChannelError() {
+}
+
+void GpuVideoDecoder::OnMessageReceived(const IPC::Message& msg) {
+ IPC_BEGIN_MESSAGE_MAP(GpuVideoDecoder, msg)
+ IPC_MESSAGE_HANDLER(GpuVideoDecoderMsg_Initialize,
+ OnInitialize)
+ IPC_MESSAGE_HANDLER(GpuVideoDecoderMsg_Destroy,
+ OnUninitialize)
+ IPC_MESSAGE_HANDLER(GpuVideoDecoderMsg_Flush,
+ OnFlush)
+ IPC_MESSAGE_HANDLER(GpuVideoDecoderMsg_EmptyThisBuffer,
+ OnEmptyThisBuffer)
+ IPC_MESSAGE_HANDLER(GpuVideoDecoderMsg_FillThisBuffer,
+ OnFillThisBuffer)
+ IPC_MESSAGE_HANDLER(GpuVideoDecoderMsg_FillThisBufferDoneACK,
+ OnFillThisBufferDoneACK)
+ IPC_MESSAGE_UNHANDLED_ERROR()
+ IPC_END_MESSAGE_MAP()
+}
+
+GpuVideoDecoder::GpuVideoDecoder(
+ const GpuVideoDecoderInfoParam* param,
+ GpuChannel* channel,
+ base::ProcessHandle handle)
+ : decoder_host_route_id_(param->decoder_host_route_id_),
+ channel_(channel), renderer_handle_(handle) {
+}
+
+void GpuVideoDecoder::OnInitialize(const GpuVideoDecoderInitParam& param) {
+ init_param_ = param;
+ done_param_.success_ = DoInitialize(init_param_, &done_param_);
+}
+
+void GpuVideoDecoder::OnUninitialize() {
+ DoUninitialize();
+}
+
+void GpuVideoDecoder::OnFlush() {
+ DoFlush();
+}
+
+void GpuVideoDecoder::OnEmptyThisBuffer(
+ const GpuVideoDecoderInputBufferParam& buffer) {
+ DoEmptyThisBuffer(buffer);
+}
+void GpuVideoDecoder::OnFillThisBuffer(
+ const GpuVideoDecoderOutputBufferParam& frame) {
+ DoFillThisBuffer(frame);
+}
+
+void GpuVideoDecoder::OnFillThisBufferDoneACK() {
+ DoFillThisBufferDoneACK();
+}
+
+void GpuVideoDecoder::SendInitializeDone(
+ const GpuVideoDecoderInitDoneParam& param) {
+ if (!channel_->Send(
+ new GpuVideoDecoderHostMsg_InitializeACK(route_id(), param))) {
+ LOG(ERROR) << "GpuVideoDecoderMsg_InitializeACK failed";
+ }
+}
+
+void GpuVideoDecoder::SendUninitializeDone() {
+ if (!channel_->Send(new GpuVideoDecoderHostMsg_DestroyACK(route_id()))) {
+ LOG(ERROR) << "GpuVideoDecoderMsg_DestroyACK failed";
+ }
+}
+
+void GpuVideoDecoder::SendFlushDone() {
+ if (!channel_->Send(new GpuVideoDecoderHostMsg_FlushACK(route_id()))) {
+ LOG(ERROR) << "GpuVideoDecoderMsg_FlushACK failed";
+ }
+}
+
+void GpuVideoDecoder::SendEmptyBufferDone() {
+ if (!channel_->Send(
+ new GpuVideoDecoderHostMsg_EmptyThisBufferDone(route_id()))) {
+ LOG(ERROR) << "GpuVideoDecoderMsg_EmptyThisBufferDone failed";
+ }
+}
+
+void GpuVideoDecoder::SendEmptyBufferACK() {
+ if (!channel_->Send(
+ new GpuVideoDecoderHostMsg_EmptyThisBufferACK(route_id()))) {
+ LOG(ERROR) << "GpuVideoDecoderMsg_EmptyThisBufferACK failed";
+ }
+}
+
+void GpuVideoDecoder::SendFillBufferDone(
+ const GpuVideoDecoderOutputBufferParam& frame) {
+ if (!channel_->Send(
+ new GpuVideoDecoderHostMsg_FillThisBufferDone(route_id(), frame))) {
+ LOG(ERROR) << "GpuVideoDecoderMsg_FillThisBufferDone failed";
+ }
+}
+
diff --git a/chrome/gpu/gpu_video_decoder.h b/chrome/gpu/gpu_video_decoder.h
new file mode 100644
index 0000000..62170fe
--- /dev/null
+++ b/chrome/gpu/gpu_video_decoder.h
@@ -0,0 +1,76 @@
+// Copyright (c) 2010 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef CHROME_GPU_GPU_VIDEO_DECODER_H_
+#define CHROME_GPU_GPU_VIDEO_DECODER_H_
+
+#include "base/basictypes.h"
+#include "base/callback.h"
+#include "base/ref_counted.h"
+#include "base/scoped_ptr.h"
+#include "chrome/common/gpu_video_common.h"
+#include "ipc/ipc_channel.h"
+
+class GpuChannel;
+
+class GpuVideoDecoder
+ : public IPC::Channel::Listener,
+ public base::RefCountedThreadSafe<GpuVideoDecoder> {
+
+ public:
+ // IPC::Channel::Listener.
+ virtual void OnChannelConnected(int32 peer_pid);
+ virtual void OnChannelError();
+ virtual void OnMessageReceived(const IPC::Message& message);
+
+ virtual bool DoInitialize(const GpuVideoDecoderInitParam& init_param,
+ GpuVideoDecoderInitDoneParam* done_param) = 0;
+ virtual bool DoUninitialize() = 0;
+ virtual void DoFlush() = 0;
+ virtual void DoEmptyThisBuffer(
+ const GpuVideoDecoderInputBufferParam& buffer) = 0;
+ virtual void DoFillThisBuffer(
+ const GpuVideoDecoderOutputBufferParam& frame) = 0;
+ virtual void DoFillThisBufferDoneACK() = 0;
+
+ GpuVideoDecoder(const GpuVideoDecoderInfoParam* param,
+ GpuChannel* channel_,
+ base::ProcessHandle handle);
+ virtual ~GpuVideoDecoder() {}
+
+ protected:
+ // Output message helper.
+ void SendInitializeDone(const GpuVideoDecoderInitDoneParam& param);
+ void SendUninitializeDone();
+ void SendFlushDone();
+ void SendEmptyBufferDone();
+ void SendEmptyBufferACK();
+ void SendFillBufferDone(const GpuVideoDecoderOutputBufferParam& frame);
+
+ int32 route_id() { return decoder_host_route_id_; }
+
+ int32 decoder_host_route_id_;
+ GpuChannel* channel_;
+ base::ProcessHandle renderer_handle_;
+
+ GpuVideoDecoderInitParam init_param_;
+ GpuVideoDecoderInitDoneParam done_param_;
+
+ scoped_ptr<base::SharedMemory> input_transfer_buffer_;
+ scoped_ptr<base::SharedMemory> output_transfer_buffer_;
+
+ private:
+ // Input message handler.
+ void OnInitialize(const GpuVideoDecoderInitParam& param);
+ void OnUninitialize();
+ void OnFlush();
+ void OnEmptyThisBuffer(const GpuVideoDecoderInputBufferParam& buffer);
+ void OnFillThisBuffer(const GpuVideoDecoderOutputBufferParam& frame);
+ void OnFillThisBufferDoneACK();
+
+ DISALLOW_COPY_AND_ASSIGN(GpuVideoDecoder);
+};
+
+#endif // CHROME_GPU_GPU_VIDEO_DECODER_H_
+
diff --git a/chrome/gpu/gpu_video_decoder_mft.cc b/chrome/gpu/gpu_video_decoder_mft.cc
new file mode 100644
index 0000000..8c16201
--- /dev/null
+++ b/chrome/gpu/gpu_video_decoder_mft.cc
@@ -0,0 +1,594 @@
+// Copyright (c) 2010 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "chrome/gpu/gpu_video_decoder_mft.h"
+
+#if defined(OS_WIN)
+
+#pragma comment(lib, "dxva2.lib")
+#pragma comment(lib, "d3d9.lib")
+#pragma comment(lib, "evr.lib")
+#pragma comment(lib, "mf.lib")
+#pragma comment(lib, "mfplat.lib")
+#pragma comment(lib, "mfuuid.lib")
+#pragma comment(lib, "strmiids.lib")
+
+GpuVideoDecoderMFT::GpuVideoDecoderMFT(
+ const GpuVideoDecoderInfoParam* param,
+ GpuChannel* channel_,
+ base::ProcessHandle handle)
+ : GpuVideoDecoder(param, channel_, handle),
+ state_(kNormal) {
+ output_transfer_buffer_busy_ = false;
+ pending_request_ = 0;
+}
+
+bool GpuVideoDecoderMFT::StartupComLibraries() {
+ HRESULT hr;
+ hr = CoInitializeEx(NULL,
+ COINIT_APARTMENTTHREADED | COINIT_DISABLE_OLE1DDE);
+ if (FAILED(hr)) {
+ LOG(ERROR) << "CoInit fail";
+ return false;
+ }
+
+ hr = MFStartup(MF_VERSION, MFSTARTUP_FULL);
+ if (FAILED(hr)) {
+ LOG(ERROR) << "MFStartup fail";
+ CoUninitialize();
+ return false;
+ }
+ return true;
+}
+
+void GpuVideoDecoderMFT::ShutdownComLibraries() {
+ HRESULT hr;
+ hr = MFShutdown();
+ if (FAILED(hr)) {
+ LOG(WARNING) << "Warning: MF failed to shutdown";
+ }
+ CoUninitialize();
+}
+
+// Creates a Media Foundation sample with one buffer containing a copy of the
+// given Annex B stream data.
+// If duration and sample_time are not known, provide 0.
+// min_size specifies the minimum size of the buffer (might be required by
+// the decoder for input). The times here should be given in 100ns units.
+IMFSample* GpuVideoDecoderMFT::CreateInputSample(uint8* data,
+ int32 size,
+ int64 timestamp,
+ int64 duration,
+ int32 min_size) {
+ ScopedComPtr<IMFSample> sample;
+ HRESULT hr = MFCreateSample(sample.Receive());
+ if (FAILED(hr) || !sample.get()) {
+ LOG(ERROR) << "Unable to create an empty sample";
+ return NULL;
+ }
+
+ ScopedComPtr<IMFMediaBuffer> buffer;
+ int32 buffer_length = min_size > size ? min_size : size;
+ hr = MFCreateMemoryBuffer(buffer_length, buffer.Receive());
+ if (FAILED(hr)) {
+ LOG(ERROR) << "Unable to create an empty buffer";
+ return NULL;
+ }
+
+ hr = sample->AddBuffer(buffer.get());
+ if (FAILED(hr)) {
+ LOG(ERROR) << "Failed to add empty buffer to sample";
+ return NULL;
+ }
+
+ if (duration > 0 && FAILED(sample->SetSampleDuration(duration))) {
+ LOG(ERROR) << "Failed to set sample duration";
+ return NULL;
+ }
+
+ if (timestamp > 0 && FAILED(sample->SetSampleTime(timestamp))) {
+ LOG(ERROR) << "Failed to set sample time";
+ return NULL;
+ }
+
+ DWORD max_length, current_length;
+ uint8* buffer_data;
+ hr = buffer->Lock(&buffer_data, &max_length, &current_length);
+ if (FAILED(hr)) {
+ LOG(ERROR) << "Failed to lock buffer";
+ return NULL;
+ }
+ CHECK_GE(static_cast<int>(max_length), size);
+ memcpy(buffer_data, data, size);
+ CHECK(SUCCEEDED(buffer->Unlock()));
+
+ hr = buffer->SetCurrentLength(size);
+ if (FAILED(hr)) {
+ LOG(ERROR) << "Failed to set current length to " << size;
+ return NULL;
+ }
+
+ return sample.Detach();
+}
+
+bool GpuVideoDecoderMFT::CreateD3DDevManager(HWND video_window) {
+ d3d9_.Attach(Direct3DCreate9(D3D_SDK_VERSION));
+ if (d3d9_.get() == NULL) {
+ LOG(ERROR) << "Failed to create D3D9";
+ return false;
+ }
+
+ D3DPRESENT_PARAMETERS present_params = {0};
+ present_params.BackBufferWidth = init_param_.width_;
+ present_params.BackBufferHeight = init_param_.height_;
+ present_params.BackBufferFormat = D3DFMT_UNKNOWN;
+ present_params.BackBufferCount = 1;
+ present_params.SwapEffect = D3DSWAPEFFECT_DISCARD;
+ present_params.hDeviceWindow = video_window;
+ present_params.Windowed = TRUE;
+ present_params.Flags = D3DPRESENTFLAG_VIDEO;
+ present_params.FullScreen_RefreshRateInHz = 0;
+ present_params.PresentationInterval = 0;
+
+ // D3DCREATE_HARDWARE_VERTEXPROCESSING specifies hardware vertex processing.
+ // (Is it even needed for just video decoding?)
+ HRESULT hr = d3d9_->CreateDevice(D3DADAPTER_DEFAULT,
+ D3DDEVTYPE_HAL,
+ video_window,
+ D3DCREATE_HARDWARE_VERTEXPROCESSING,
+ &present_params,
+ device_.Receive());
+ if (FAILED(hr)) {
+ LOG(ERROR) << "Failed to create D3D Device";
+ return false;
+ }
+
+ UINT dev_manager_reset_token = 0;
+ hr = DXVA2CreateDirect3DDeviceManager9(&dev_manager_reset_token,
+ device_manager_.Receive());
+ if (FAILED(hr)) {
+ LOG(ERROR) << "Couldn't create D3D Device manager";
+ return false;
+ }
+
+ hr = device_manager_->ResetDevice(device_.get(),
+ dev_manager_reset_token);
+ if (FAILED(hr)) {
+ LOG(ERROR) << "Failed to set device to device manager";
+ return false;
+ }
+
+ return true;
+}
+
+bool GpuVideoDecoderMFT::InitMediaFoundation() {
+ if (!StartupComLibraries())
+ return false;
+
+ if (!CreateD3DDevManager(GetDesktopWindow()))
+ return false;
+
+ if (!InitDecoder())
+ return false;
+
+ if (!GetStreamsInfoAndBufferReqs())
+ return false;
+
+ return SendMFTMessage(MFT_MESSAGE_NOTIFY_START_OF_STREAM);
+}
+
+bool GpuVideoDecoderMFT::InitDecoder() {
+ // TODO(jiesun): use MFEnum to get decoder CLSID.
+ HRESULT hr = CoCreateInstance(__uuidof(CMSH264DecoderMFT),
+ NULL,
+ CLSCTX_INPROC_SERVER,
+ __uuidof(IMFTransform),
+ reinterpret_cast<void**>(decoder_.Receive()));
+ if (FAILED(hr) || !decoder_.get()) {
+ LOG(ERROR) << "CoCreateInstance failed " << std::hex << std::showbase << hr;
+ return false;
+ }
+
+ if (!CheckDecoderDxvaSupport())
+ return false;
+
+ hr = decoder_->ProcessMessage(
+ MFT_MESSAGE_SET_D3D_MANAGER,
+ reinterpret_cast<ULONG_PTR>(device_manager_.get()));
+ if (FAILED(hr)) {
+ LOG(ERROR) << "Failed to set D3D9 device to decoder";
+ return false;
+ }
+
+ return SetDecoderMediaTypes();
+}
+
+bool GpuVideoDecoderMFT::CheckDecoderDxvaSupport() {
+ ScopedComPtr<IMFAttributes> attributes;
+ HRESULT hr = decoder_->GetAttributes(attributes.Receive());
+ if (FAILED(hr)) {
+ LOG(ERROR) << "Unlock: Failed to get attributes, hr = "
+ << std::hex << std::showbase << hr;
+ return false;
+ }
+
+ UINT32 dxva;
+ hr = attributes->GetUINT32(MF_SA_D3D_AWARE, &dxva);
+ if (FAILED(hr) || !dxva) {
+ LOG(ERROR) << "Failed to get DXVA attr, hr = "
+ << std::hex << std::showbase << hr
+ << "this might not be the right decoder.";
+ return false;
+ }
+ return true;
+}
+
+bool GpuVideoDecoderMFT::SetDecoderMediaTypes() {
+ return SetDecoderInputMediaType() &&
+ SetDecoderOutputMediaType(MFVideoFormat_NV12);
+}
+
+bool GpuVideoDecoderMFT::SetDecoderInputMediaType() {
+ ScopedComPtr<IMFMediaType> media_type;
+ HRESULT hr = MFCreateMediaType(media_type.Receive());
+ if (FAILED(hr)) {
+ LOG(ERROR) << "Failed to create empty media type object";
+ return false;
+ }
+
+ hr = media_type->SetGUID(MF_MT_MAJOR_TYPE, MFMediaType_Video);
+ if (FAILED(hr)) {
+ LOG(ERROR) << "SetGUID for major type failed";
+ return false;
+ }
+
+ hr = media_type->SetGUID(MF_MT_SUBTYPE, MFVideoFormat_H264);
+ if (FAILED(hr)) {
+ LOG(ERROR) << "SetGUID for subtype failed";
+ return false;
+ }
+
+ hr = decoder_->SetInputType(0, media_type.get(), 0); // No flags
+ if (FAILED(hr)) {
+ LOG(ERROR) << "Failed to set decoder's input type";
+ return false;
+ }
+
+ return true;
+}
+
+bool GpuVideoDecoderMFT::SetDecoderOutputMediaType(const GUID subtype) {
+ DWORD i = 0;
+ IMFMediaType* out_media_type;
+ bool found = false;
+ while (SUCCEEDED(decoder_->GetOutputAvailableType(0, i, &out_media_type))) {
+ GUID out_subtype;
+ HRESULT hr = out_media_type->GetGUID(MF_MT_SUBTYPE, &out_subtype);
+ if (FAILED(hr)) {
+ LOG(ERROR) << "Failed to GetGUID() on GetOutputAvailableType() " << i;
+ out_media_type->Release();
+ continue;
+ }
+ if (out_subtype == subtype) {
+ hr = decoder_->SetOutputType(0, out_media_type, 0); // No flags
+ if (FAILED(hr)) {
+ LOG(ERROR) << "Failed to SetOutputType to |subtype| or obtain "
+ << "width/height/stride " << std::hex << hr;
+ } else {
+ out_media_type->Release();
+ return true;
+ }
+ }
+ i++;
+ out_media_type->Release();
+ }
+ return false;
+}
+
+bool GpuVideoDecoderMFT::SendMFTMessage(MFT_MESSAGE_TYPE msg) {
+ HRESULT hr = decoder_->ProcessMessage(msg, NULL);
+ return SUCCEEDED(hr);
+}
+
+// Prints out info about the input/output streams, gets the minimum buffer sizes
+// for input and output samples.
+// The MFT will not allocate buffer for neither input nor output, so we have
+// to do it ourselves and make sure they're the correct size.
+// Exception is when dxva is enabled, the decoder will allocate output.
+bool GpuVideoDecoderMFT::GetStreamsInfoAndBufferReqs() {
+ HRESULT hr = decoder_->GetInputStreamInfo(0, &input_stream_info_);
+ if (FAILED(hr)) {
+ LOG(ERROR) << "Failed to get input stream info";
+ return false;
+ }
+ LOG(INFO) << "Input stream info: ";
+ LOG(INFO) << "Max latency: " << input_stream_info_.hnsMaxLatency;
+
+ // There should be three flags, one for requiring a whole frame be in a
+ // single sample, one for requiring there be one buffer only in a single
+ // sample, and one that specifies a fixed sample size. (as in cbSize)
+ LOG(INFO) << "Flags: "
+ << std::hex << std::showbase << input_stream_info_.dwFlags;
+ CHECK_EQ(static_cast<int>(input_stream_info_.dwFlags), 0x7);
+ LOG(INFO) << "Min buffer size: " << input_stream_info_.cbSize;
+ LOG(INFO) << "Max lookahead: " << input_stream_info_.cbMaxLookahead;
+ LOG(INFO) << "Alignment: " << input_stream_info_.cbAlignment;
+ if (input_stream_info_.cbAlignment > 0) {
+ LOG(WARNING) << "Warning: Decoder requires input to be aligned";
+ }
+
+ hr = decoder_->GetOutputStreamInfo(0, &output_stream_info_);
+ if (FAILED(hr)) {
+ LOG(ERROR) << "Failed to get output stream info";
+ return false;
+ }
+ LOG(INFO) << "Output stream info: ";
+
+ // The flags here should be the same and mean the same thing, except when
+ // DXVA is enabled, there is an extra 0x100 flag meaning decoder will
+ // allocate its own sample.
+ CHECK_EQ(static_cast<int>(output_stream_info_.dwFlags), 0x107);
+ LOG(INFO) << "Min buffer size: " << output_stream_info_.cbSize;
+ LOG(INFO) << "Alignment: " << output_stream_info_.cbAlignment;
+ if (output_stream_info_.cbAlignment > 0) {
+ LOG(WARNING) << "Warning: Decoder requires output to be aligned";
+ }
+
+ return true;
+}
+
+bool GpuVideoDecoderMFT::DoInitialize(
+ const GpuVideoDecoderInitParam& param,
+ GpuVideoDecoderInitDoneParam* done_param) {
+ LOG(ERROR) << "GpuVideoDecoderMFT::DoInitialize";
+
+ done_param->format_ =
+ GpuVideoDecoderInitDoneParam::SurfaceFormat_YV12;
+ done_param->surface_type_ =
+ GpuVideoDecoderInitDoneParam::SurfaceTypeSystemMemory;
+ done_param->input_buffer_handle_ = base::SharedMemory::NULLHandle();
+ done_param->output_buffer_handle_ = base::SharedMemory::NULLHandle();
+
+ do {
+ done_param->success_ = false;
+
+ if (!InitMediaFoundation())
+ break;
+
+ // TODO(jiesun): Check the assumption of input size < original size.
+ done_param->input_buffer_size_ = param.width_ * param.height_ * 3 / 2;
+ input_transfer_buffer_.reset(new base::SharedMemory);
+ if (!input_transfer_buffer_->Create(std::wstring(), false, false,
+ done_param->input_buffer_size_))
+ break;
+ if (!input_transfer_buffer_->Map(done_param->input_buffer_size_))
+ break;
+
+ // TODO(jiesun): Allocate this according to the surface format.
+ // The format actually could change during streaming, we need to
+ // notify GpuVideoDecoderHost side when this happened and renegotiate
+ // the transfer buffer.
+ done_param->output_buffer_size_ = param.width_ * param.height_ * 3 / 2;
+ output_transfer_buffer_.reset(new base::SharedMemory);
+ if (!output_transfer_buffer_->Create(std::wstring(), false, false,
+ done_param->output_buffer_size_))
+ break;
+ if (!output_transfer_buffer_->Map(done_param->output_buffer_size_))
+ break;
+
+ if (!input_transfer_buffer_->ShareToProcess(
+ renderer_handle_,
+ &done_param->input_buffer_handle_))
+ break;
+ if (!output_transfer_buffer_->ShareToProcess(
+ renderer_handle_,
+ &done_param->output_buffer_handle_))
+ break;
+
+ done_param->success_ = true;
+ } while (0);
+
+ SendInitializeDone(*done_param);
+ return true;
+}
+
+bool GpuVideoDecoderMFT::DoUninitialize() {
+ LOG(ERROR) << "GpuVideoDecoderMFT::DoUninitialize";
+ SendUninitializeDone();
+ return true;
+}
+
+void GpuVideoDecoderMFT::DoEmptyThisBuffer(
+ const GpuVideoDecoderInputBufferParam& buffer) {
+ LOG(ERROR) << "GpuVideoDecoderMFT::EmptyThisBuffer";
+
+ CHECK(input_transfer_buffer_->memory());
+ ScopedComPtr<IMFSample> sample;
+ if (buffer.size_) {
+ uint8* data = static_cast<uint8*>(input_transfer_buffer_->memory());
+ sample.Attach(CreateInputSample(data,
+ buffer.size_,
+ buffer.timestamp_*10,
+ 0LL,
+ input_stream_info_.cbSize));
+ CHECK(sample.get());
+ } else {
+ state_ = kEosFlush;
+ }
+
+ input_buffer_queue_.push_back(sample);
+ SendEmptyBufferACK();
+
+ while (pending_request_)
+ if (!DoDecode()) break;
+}
+
+void GpuVideoDecoderMFT::DoFillThisBuffer(
+ const GpuVideoDecoderOutputBufferParam& frame) {
+ LOG(ERROR) << "GpuVideoDecoderMFT::FillThisBuffer";
+
+ pending_request_++;
+ while (pending_request_)
+ if (!DoDecode()) break;
+}
+
+void GpuVideoDecoderMFT::DoFillThisBufferDoneACK() {
+ output_transfer_buffer_busy_ = false;
+ pending_request_--;
+ while (pending_request_)
+ if (!DoDecode()) break;
+}
+
+void GpuVideoDecoderMFT::DoFlush() {
+ state_ = kFlushing;
+
+ while (!input_buffer_queue_.empty())
+ input_buffer_queue_.pop_front();
+ pending_request_ = 0;
+ // TODO(jiesun): this is wrong??
+ output_transfer_buffer_busy_ = false;
+ SendMFTMessage(MFT_MESSAGE_COMMAND_FLUSH);
+
+ state_ = kNormal;
+ SendFlushDone();
+}
+
+bool GpuVideoDecoderMFT::DoDecode() {
+ if (state_ != kNormal && state_ != kEosFlush) return false;
+ if (output_transfer_buffer_busy_) return false;
+
+ MFT_OUTPUT_DATA_BUFFER output_data_buffer;
+ memset(&output_data_buffer, 0, sizeof(output_data_buffer));
+ output_data_buffer.dwStreamID = 0;
+
+ ScopedComPtr<IMFSample> output_sample;
+ DWORD status;
+ HRESULT hr = decoder_->ProcessOutput(0, // No flags
+ 1, // # of out streams to pull from
+ &output_data_buffer,
+ &status);
+
+ IMFCollection* events = output_data_buffer.pEvents;
+ if (events != NULL) {
+ LOG(INFO) << "Got events from ProcessOuput, but discarding";
+ events->Release();
+ }
+
+ if (hr == MF_E_TRANSFORM_STREAM_CHANGE) {
+ hr = SetDecoderOutputMediaType(MFVideoFormat_NV12);
+ CHECK(SUCCEEDED(hr));
+ return true;
+ }
+ if (hr == MF_E_TRANSFORM_NEED_MORE_INPUT) {
+ if (input_buffer_queue_.empty()) {
+ if (state_ == kEosFlush) {
+ GpuVideoDecoderOutputBufferParam output_param;
+ output_param.timestamp_ = 0;
+ output_param.duration_ = 0;
+ output_param.flags_ =
+ GpuVideoDecoderOutputBufferParam::kFlagsEndOfStream;
+ output_transfer_buffer_busy_ = true;
+ SendFillBufferDone(output_param);
+ }
+ return false;
+ }
+ while (!input_buffer_queue_.empty()) {
+ ScopedComPtr<IMFSample> input_sample = input_buffer_queue_.front();
+ input_buffer_queue_.pop_front();
+
+ if (input_sample.get()) {
+ HRESULT hr = decoder_->ProcessInput(0, input_sample.get(), 0);
+ if (hr == MF_E_NOTACCEPTING) return true;
+ CHECK(SUCCEEDED(hr));
+ } else {
+ SendMFTMessage(MFT_MESSAGE_NOTIFY_END_OF_STREAM);
+ }
+
+ // If we already received the input EOS, we do not need to issue
+ // more requests for new samples.
+ if (state_ != kEosFlush)
+ SendEmptyBufferDone();
+ }
+ return true;
+ }
+
+ CHECK(SUCCEEDED(hr));
+ output_sample.Attach(output_data_buffer.pSample);
+ CHECK(output_sample.get());
+
+ int64 timestamp, duration;
+ output_sample->GetSampleTime(&timestamp);
+ output_sample->GetSampleDuration(&duration);
+
+ // The duration and timestamps are in 100-ns units, so divide by 10
+ // to convert to microseconds.
+ timestamp /= 10;
+ duration /= 10;
+
+ // Sanity checks for checking if there is really something in the sample.
+ DWORD buf_count;
+ hr = output_sample->GetBufferCount(&buf_count);
+ CHECK(SUCCEEDED(hr) && buf_count == 1);
+
+ ScopedComPtr<IMFMediaBuffer> output_buffer;
+ hr = output_sample->GetBufferByIndex(0, output_buffer.Receive());
+ CHECK(SUCCEEDED(hr));
+
+ ScopedComPtr<IDirect3DSurface9> surface;
+ hr = MFGetService(output_buffer, MR_BUFFER_SERVICE,
+ IID_PPV_ARGS(surface.Receive()));
+ CHECK(SUCCEEDED(hr));
+
+
+ // NV12 to YV12
+ D3DLOCKED_RECT d3dlocked_rect;
+ RECT rect = {0, 0, init_param_.width_, init_param_.height_};
+ hr = surface->LockRect(&d3dlocked_rect, &rect, 0);
+
+ if (SUCCEEDED(hr)) {
+ D3DSURFACE_DESC desc;
+ hr = surface->GetDesc(&desc);
+ CHECK(SUCCEEDED(hr));
+
+ uint32 src_stride = d3dlocked_rect.Pitch;
+ uint32 dst_stride = init_param_.width_;
+ uint8* src_y = static_cast<uint8*>(d3dlocked_rect.pBits);
+ uint8* src_uv = src_y + src_stride * desc.Height;
+ uint8* dst_y = static_cast<uint8*>(output_transfer_buffer_->memory());
+ uint8* dst_u = dst_y + dst_stride * init_param_.height_;
+ uint8* dst_v = dst_u + dst_stride * init_param_.height_ / 4;
+
+ for ( int y = 0 ; y < init_param_.height_; ++y ) {
+ for ( int x = 0 ; x < init_param_.width_ ; ++x ) {
+ dst_y[x] = src_y[x];
+ if (!(y & 1)) {
+ if (x & 1)
+ dst_v[x>>1] = src_uv[x];
+ else
+ dst_u[x>>1] = src_uv[x];
+ }
+ }
+ dst_y += dst_stride;
+ src_y += src_stride;
+ if (!(y & 1)) {
+ src_uv += src_stride;
+ dst_v += dst_stride >> 1;
+ dst_u += dst_stride >> 1;
+ }
+ }
+ hr = surface->UnlockRect();
+ CHECK(SUCCEEDED(hr));
+ }
+
+ GpuVideoDecoderOutputBufferParam output_param;
+ output_param.timestamp_ = timestamp;
+ output_param.duration_ = duration;
+ output_param.flags_ = 0;
+ output_transfer_buffer_busy_ = true;
+ SendFillBufferDone(output_param);
+ return true;
+}
+
+#endif
+
diff --git a/chrome/gpu/gpu_video_decoder_mft.h b/chrome/gpu/gpu_video_decoder_mft.h
new file mode 100644
index 0000000..3644617
--- /dev/null
+++ b/chrome/gpu/gpu_video_decoder_mft.h
@@ -0,0 +1,98 @@
+// Copyright (c) 2010 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef CHROME_GPU_GPU_VIDEO_DECODER_MFT_H_
+#define CHROME_GPU_GPU_VIDEO_DECODER_MFT_H_
+
+#include "build/build_config.h" // For OS_WIN.
+
+#if defined(OS_WIN)
+
+#include <d3d9.h>
+#include <dxva2api.h>
+#include <evr.h>
+#include <initguid.h>
+#include <mfapi.h>
+#include <mferror.h>
+#include <mfidl.h>
+#include <shlwapi.h>
+#include <wmcodecdsp.h>
+
+#include <deque>
+
+#include "base/scoped_comptr_win.h"
+#include "chrome/gpu/gpu_video_decoder.h"
+
+class GpuVideoDecoderMFT : public GpuVideoDecoder {
+ public:
+ virtual bool DoInitialize(const GpuVideoDecoderInitParam& init_param,
+ GpuVideoDecoderInitDoneParam* done_param);
+ virtual bool DoUninitialize();
+ virtual void DoFlush();
+ virtual void DoEmptyThisBuffer(const GpuVideoDecoderInputBufferParam& buffer);
+ virtual void DoFillThisBuffer(const GpuVideoDecoderOutputBufferParam& frame);
+ virtual void DoFillThisBufferDoneACK();
+
+ private:
+ GpuVideoDecoderMFT(const GpuVideoDecoderInfoParam* param,
+ GpuChannel* channel_,
+ base::ProcessHandle handle);
+
+ friend class GpuVideoService;
+
+ // TODO(jiesun): Find a way to move all these to GpuVideoService..
+ static bool StartupComLibraries();
+ static void ShutdownComLibraries();
+ bool CreateD3DDevManager(HWND video_window);
+
+ // helper.
+ bool InitMediaFoundation();
+ bool InitDecoder();
+ bool CheckDecoderDxvaSupport();
+
+ bool SetDecoderMediaTypes();
+ bool SetDecoderInputMediaType();
+ bool SetDecoderOutputMediaType(const GUID subtype);
+ bool SendMFTMessage(MFT_MESSAGE_TYPE msg);
+ bool GetStreamsInfoAndBufferReqs();
+
+ // Help function to create IMFSample* out of input buffer.
+ // data are copied into IMFSample's own IMFMediaBuffer.
+ // Client should Release() the IMFSample*.
+ static IMFSample* CreateInputSample(uint8* data,
+ int32 size,
+ int64 timestamp,
+ int64 duration,
+ int32 min_size);
+
+ bool DoDecode();
+
+ ScopedComPtr<IDirect3D9> d3d9_;
+ ScopedComPtr<IDirect3DDevice9> device_;
+ ScopedComPtr<IDirect3DDeviceManager9> device_manager_;
+ ScopedComPtr<IMFTransform> decoder_;
+
+ MFT_INPUT_STREAM_INFO input_stream_info_;
+ MFT_OUTPUT_STREAM_INFO output_stream_info_;
+
+ std::deque<ScopedComPtr<IMFSample> > input_buffer_queue_;
+ bool output_transfer_buffer_busy_;
+
+ typedef enum {
+ kNormal, // normal playing state.
+ kFlushing, // upon received Flush(), before FlushDone()
+ kEosFlush, // upon input EOS received.
+ kStopped, // upon output EOS received.
+ } State;
+ State state_;
+
+ int32 pending_request_;
+
+ DISALLOW_COPY_AND_ASSIGN(GpuVideoDecoderMFT);
+};
+
+#endif
+
+#endif // CHROME_GPU_GPU_VIDEO_DECODER_MFT_H_
+
diff --git a/chrome/gpu/gpu_video_service.cc b/chrome/gpu/gpu_video_service.cc
new file mode 100644
index 0000000..45b7063
--- /dev/null
+++ b/chrome/gpu/gpu_video_service.cc
@@ -0,0 +1,76 @@
+// Copyright (c) 2010 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "chrome/common/gpu_messages.h"
+#include "chrome/gpu/gpu_channel.h"
+#include "chrome/gpu/gpu_video_decoder_mft.h"
+#include "chrome/gpu/gpu_video_service.h"
+
+GpuVideoService::GpuVideoService() : next_available_decoder_id_(0) {
+ // TODO(jiesun): move this time consuming stuff out of here.
+ IntializeGpuVideoService();
+}
+GpuVideoService::~GpuVideoService() {
+ // TODO(jiesun): move this time consuming stuff out of here.
+ UnintializeGpuVideoService();
+}
+
+void GpuVideoService::OnChannelConnected(int32 peer_pid) {
+ LOG(ERROR) << "GpuVideoService::OnChannelConnected";
+}
+
+void GpuVideoService::OnChannelError() {
+ LOG(ERROR) << "GpuVideoService::OnChannelError";
+}
+
+void GpuVideoService::OnMessageReceived(const IPC::Message& msg) {
+#if 0
+ IPC_BEGIN_MESSAGE_MAP(GpuVideoService, msg)
+ IPC_MESSAGE_UNHANDLED_ERROR()
+ IPC_END_MESSAGE_MAP()
+#endif
+}
+
+bool GpuVideoService::IntializeGpuVideoService() {
+ return true;
+}
+
+bool GpuVideoService::UnintializeGpuVideoService() {
+ return true;
+}
+
+bool GpuVideoService::CreateVideoDecoder(
+ GpuChannel* channel,
+ MessageRouter* router,
+ GpuVideoDecoderInfoParam* param) {
+ // TODO(jiesun): find a better way to determine which GpuVideoDecoder
+ // to return on current platform.
+#if defined(OS_WIN)
+ GpuVideoDecoderInfo decoder_info;
+ int32 decoder_id = GetNextAvailableDecoderID();
+ param->decoder_id_ = decoder_id;
+ base::ProcessHandle handle = channel->renderer_handle();
+ decoder_info.decoder_ = new GpuVideoDecoderMFT(param, channel, handle);
+ decoder_info.channel_ = channel;
+ decoder_info.param = *param;
+ decoder_map_[decoder_id] = decoder_info;
+ router->AddRoute(param->decoder_route_id_, decoder_info.decoder_);
+ return true;
+#else
+ return false;
+#endif
+}
+
+void GpuVideoService::DestroyVideoDecoder(
+ MessageRouter* router,
+ int32 decoder_id) {
+ int32 route_id = decoder_map_[decoder_id].param.decoder_route_id_;
+ router->RemoveRoute(route_id);
+ decoder_map_.erase(decoder_id);
+}
+
+int32 GpuVideoService::GetNextAvailableDecoderID() {
+ return ++next_available_decoder_id_;
+}
+
diff --git a/chrome/gpu/gpu_video_service.h b/chrome/gpu/gpu_video_service.h
new file mode 100644
index 0000000..3172031
--- /dev/null
+++ b/chrome/gpu/gpu_video_service.h
@@ -0,0 +1,56 @@
+// Copyright (c) 2010 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef CHROME_GPU_GPU_VIDEO_SERVICE_H_
+#define CHROME_GPU_GPU_VIDEO_SERVICE_H_
+
+#include <map>
+
+#include "base/scoped_ptr.h"
+#include "base/ref_counted.h"
+#include "base/singleton.h"
+#include "chrome/gpu/gpu_video_decoder.h"
+#include "ipc/ipc_channel.h"
+
+class GpuChannel;
+
+class GpuVideoService : public IPC::Channel::Listener,
+ public Singleton<GpuVideoService> {
+ public:
+ // IPC::Channel::Listener.
+ virtual void OnChannelConnected(int32 peer_pid);
+ virtual void OnChannelError();
+ virtual void OnMessageReceived(const IPC::Message& message);
+
+ bool CreateVideoDecoder(GpuChannel* channel,
+ MessageRouter* router,
+ GpuVideoDecoderInfoParam* param);
+ void DestroyVideoDecoder(MessageRouter* router,
+ int32 decoder_id);
+
+ private:
+ struct GpuVideoDecoderInfo {
+ scoped_refptr<GpuVideoDecoder> decoder_;
+ GpuChannel* channel_;
+ GpuVideoDecoderInfoParam param;
+ };
+
+ GpuVideoService();
+ virtual ~GpuVideoService();
+
+ std::map<int32, GpuVideoDecoderInfo> decoder_map_;
+ int32 next_available_decoder_id_;
+
+ // Specialize video service on different platform will override.
+ virtual bool IntializeGpuVideoService();
+ virtual bool UnintializeGpuVideoService();
+
+ int32 GetNextAvailableDecoderID();
+
+ friend struct DefaultSingletonTraits<GpuVideoService>;
+ DISALLOW_COPY_AND_ASSIGN(GpuVideoService);
+};
+
+#endif // CHROME_GPU_GPU_VIDEO_SERVICE_H_
+
diff --git a/chrome/renderer/DEPS b/chrome/renderer/DEPS
index b05975c..dc83927 100644
--- a/chrome/renderer/DEPS
+++ b/chrome/renderer/DEPS
@@ -4,6 +4,7 @@ include_rules = [
"+grit", # For generated headers
"+media/audio",
"+media/base",
+ "+media/ffmpeg",
"+media/filters",
"+sandbox/src",
"+skia/include",
diff --git a/chrome/renderer/gpu_channel_host.cc b/chrome/renderer/gpu_channel_host.cc
index 70a5224..bb98148 100644
--- a/chrome/renderer/gpu_channel_host.cc
+++ b/chrome/renderer/gpu_channel_host.cc
@@ -7,6 +7,7 @@
#include "chrome/common/child_process.h"
#include "chrome/common/gpu_messages.h"
#include "chrome/renderer/command_buffer_proxy.h"
+#include "chrome/renderer/gpu_video_service_host.h"
GpuChannelHost::GpuChannelHost() : state_(UNCONNECTED) {
}
@@ -35,6 +36,9 @@ void GpuChannelHost::OnMessageReceived(const IPC::Message& message) {
}
void GpuChannelHost::OnChannelConnected(int32 peer_pid) {
+ GpuVideoServiceHost::get()->OnGpuChannelConnected(this,
+ &router_,
+ channel_.get());
}
void GpuChannelHost::OnChannelError() {
diff --git a/chrome/renderer/gpu_channel_host.h b/chrome/renderer/gpu_channel_host.h
index 6c33acb..e35d40d 100644
--- a/chrome/renderer/gpu_channel_host.h
+++ b/chrome/renderer/gpu_channel_host.h
@@ -61,7 +61,6 @@ class GpuChannelHost : public IPC::Channel::Listener,
const gfx::Size& size,
uint32 parent_texture_id);
-
// Destroy a command buffer created by this channel.
void DestroyCommandBuffer(CommandBufferProxy* command_buffer);
diff --git a/chrome/renderer/gpu_video_decoder_host.cc b/chrome/renderer/gpu_video_decoder_host.cc
new file mode 100644
index 0000000..5e0f727
--- /dev/null
+++ b/chrome/renderer/gpu_video_decoder_host.cc
@@ -0,0 +1,199 @@
+// Copyright (c) 2010 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "chrome/renderer/gpu_video_decoder_host.h"
+
+#include "chrome/common/gpu_messages.h"
+#include "chrome/renderer/gpu_video_service_host.h"
+#include "chrome/renderer/render_thread.h"
+
+void GpuVideoDecoderHost::OnChannelError() {
+ channel_host_.release();
+}
+
+void GpuVideoDecoderHost::OnMessageReceived(const IPC::Message& msg) {
+ IPC_BEGIN_MESSAGE_MAP(GpuVideoDecoderHost, msg)
+ IPC_MESSAGE_HANDLER(GpuVideoDecoderHostMsg_InitializeACK,
+ OnInitializeDone)
+ IPC_MESSAGE_HANDLER(GpuVideoDecoderHostMsg_DestroyACK,
+ OnUninitializeDone)
+ IPC_MESSAGE_HANDLER(GpuVideoDecoderHostMsg_FlushACK,
+ OnFlushDone)
+ IPC_MESSAGE_HANDLER(GpuVideoDecoderHostMsg_EmptyThisBufferACK,
+ OnEmptyThisBufferACK)
+ IPC_MESSAGE_HANDLER(GpuVideoDecoderHostMsg_EmptyThisBufferDone,
+ OnEmptyThisBufferDone)
+ IPC_MESSAGE_HANDLER(GpuVideoDecoderHostMsg_FillThisBufferDone,
+ OnFillThisBufferDone)
+ IPC_MESSAGE_UNHANDLED_ERROR()
+ IPC_END_MESSAGE_MAP()
+}
+
+bool GpuVideoDecoderHost::Initialize(const GpuVideoDecoderInitParam& param) {
+ DCHECK_EQ(state_, kStateUninitialized);
+
+ init_param_ = param;
+ if (!channel_host_->Send(
+ new GpuVideoDecoderMsg_Initialize(route_id(), param))) {
+ LOG(ERROR) << "GpuVideoDecoderMsg_Initialize failed";
+ return false;
+ }
+ return true;
+}
+
+bool GpuVideoDecoderHost::Uninitialize() {
+ if (!channel_host_->Send(new GpuVideoDecoderMsg_Destroy(route_id()))) {
+ LOG(ERROR) << "GpuVideoDecoderMsg_Destroy failed";
+ return false;
+ }
+ return true;
+}
+
+void GpuVideoDecoderHost::EmptyThisBuffer(scoped_refptr<Buffer> buffer) {
+ DCHECK_NE(state_, kStateUninitialized);
+ DCHECK_NE(state_, kStateFlushing);
+
+ // We never own input buffers, therefore when client in flush state, it
+ // never call us with EmptyThisBuffer.
+ if (state_ != kStateNormal)
+ return;
+
+ input_buffer_queue_.push_back(buffer);
+ SendInputBufferToGpu();
+}
+
+void GpuVideoDecoderHost::FillThisBuffer(scoped_refptr<VideoFrame> frame) {
+ DCHECK_NE(state_, kStateUninitialized);
+
+ // Depends on who provides buffer. client could return buffer to
+ // us while flushing.
+ if (state_ == kStateError)
+ return;
+
+ GpuVideoDecoderOutputBufferParam param;
+ if (!channel_host_->Send(
+ new GpuVideoDecoderMsg_FillThisBuffer(route_id(), param))) {
+ LOG(ERROR) << "GpuVideoDecoderMsg_FillThisBuffer failed";
+ }
+}
+
+bool GpuVideoDecoderHost::Flush() {
+ state_ = kStateFlushing;
+ if (!channel_host_->Send(new GpuVideoDecoderMsg_Flush(route_id()))) {
+ LOG(ERROR) << "GpuVideoDecoderMsg_Flush failed";
+ return false;
+ }
+ input_buffer_queue_.clear();
+ // TODO(jiesun): because GpuVideoDeocder/GpuVideoDecoder are asynchronously.
+ // We need a way to make flush logic more clear. but I think ring buffer
+ // should make the busy flag obsolete, therefore I will leave it for now.
+ input_buffer_busy_ = false;
+ return true;
+}
+
+void GpuVideoDecoderHost::OnInitializeDone(
+ const GpuVideoDecoderInitDoneParam& param) {
+ done_param_ = param;
+ bool success = false;
+
+ do {
+ if (!param.success_)
+ break;
+
+ if (!base::SharedMemory::IsHandleValid(param.input_buffer_handle_))
+ break;
+ input_transfer_buffer_.reset(
+ new base::SharedMemory(param.input_buffer_handle_, false));
+ if (!input_transfer_buffer_->Map(param.input_buffer_size_))
+ break;
+
+ if (!base::SharedMemory::IsHandleValid(param.output_buffer_handle_))
+ break;
+ output_transfer_buffer_.reset(
+ new base::SharedMemory(param.output_buffer_handle_, false));
+ if (!output_transfer_buffer_->Map(param.output_buffer_size_))
+ break;
+
+ success = true;
+ } while (0);
+
+ state_ = success ? kStateNormal : kStateError;
+ event_handler_->OnInitializeDone(success, param);
+}
+
+void GpuVideoDecoderHost::OnUninitializeDone() {
+ input_transfer_buffer_.reset();
+ output_transfer_buffer_.reset();
+
+ event_handler_->OnUninitializeDone();
+}
+
+void GpuVideoDecoderHost::OnFlushDone() {
+ state_ = kStateNormal;
+ event_handler_->OnFlushDone();
+}
+
+void GpuVideoDecoderHost::OnEmptyThisBufferDone() {
+ scoped_refptr<Buffer> buffer;
+ event_handler_->OnEmptyBufferDone(buffer);
+}
+
+void GpuVideoDecoderHost::OnFillThisBufferDone(
+ const GpuVideoDecoderOutputBufferParam& param) {
+ scoped_refptr<VideoFrame> frame;
+
+ if (param.flags_ & GpuVideoDecoderOutputBufferParam::kFlagsEndOfStream) {
+ VideoFrame::CreateEmptyFrame(&frame);
+ } else {
+ VideoFrame::CreateFrame(VideoFrame::YV12,
+ init_param_.width_,
+ init_param_.height_,
+ base::TimeDelta::FromMicroseconds(param.timestamp_),
+ base::TimeDelta::FromMicroseconds(param.duration_),
+ &frame);
+
+ uint8* src = static_cast<uint8*>(output_transfer_buffer_->memory());
+ uint8* data0 = frame->data(0);
+ uint8* data1 = frame->data(1);
+ uint8* data2 = frame->data(2);
+ int32 size = init_param_.width_ * init_param_.height_;
+ memcpy(data0, src, size);
+ memcpy(data1, src + size, size / 4);
+ memcpy(data2, src + size + size / 4, size / 4);
+ }
+
+ event_handler_->OnFillBufferDone(frame);
+ if (!channel_host_->Send(
+ new GpuVideoDecoderMsg_FillThisBufferDoneACK(route_id()))) {
+ LOG(ERROR) << "GpuVideoDecoderMsg_FillThisBufferDoneACK failed";
+ }
+}
+
+void GpuVideoDecoderHost::OnEmptyThisBufferACK() {
+ input_buffer_busy_ = false;
+ SendInputBufferToGpu();
+}
+
+void GpuVideoDecoderHost::SendInputBufferToGpu() {
+ if (input_buffer_busy_) return;
+ if (input_buffer_queue_.empty()) return;
+
+ input_buffer_busy_ = true;
+
+ scoped_refptr<Buffer> buffer;
+ buffer = input_buffer_queue_.front();
+ input_buffer_queue_.pop_front();
+
+ // Send input data to GPU process.
+ GpuVideoDecoderInputBufferParam param;
+ param.offset_ = 0;
+ param.size_ = buffer->GetDataSize();
+ param.timestamp_ = buffer->GetTimestamp().InMicroseconds();
+ memcpy(input_transfer_buffer_->memory(), buffer->GetData(), param.size_);
+ if (!channel_host_->Send(
+ new GpuVideoDecoderMsg_EmptyThisBuffer(route_id(), param))) {
+ LOG(ERROR) << "GpuVideoDecoderMsg_EmptyThisBuffer failed";
+ }
+}
+
diff --git a/chrome/renderer/gpu_video_decoder_host.h b/chrome/renderer/gpu_video_decoder_host.h
new file mode 100644
index 0000000..2bd2f5b
--- /dev/null
+++ b/chrome/renderer/gpu_video_decoder_host.h
@@ -0,0 +1,126 @@
+// Copyright (c) 2010 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef CHROME_RENDERER_GPU_VIDEO_DECODER_HOST_H_
+#define CHROME_RENDERER_GPU_VIDEO_DECODER_HOST_H_
+
+#include <deque>
+
+#include "base/singleton.h"
+#include "chrome/common/gpu_video_common.h"
+#include "chrome/renderer/gpu_channel_host.h"
+#include "ipc/ipc_channel_proxy.h"
+#include "media/base/buffers.h"
+#include "media/base/video_frame.h"
+
+using media::VideoFrame;
+using media::Buffer;
+
+class GpuVideoServiceHost;
+
+class GpuVideoDecoderHost
+ : public base::RefCountedThreadSafe<GpuVideoDecoderHost>,
+ public IPC::Channel::Listener {
+ public:
+ class EventHandler {
+ public:
+ virtual void OnInitializeDone(
+ bool success,
+ const GpuVideoDecoderInitDoneParam& param) = 0;
+ virtual void OnUninitializeDone() = 0;
+ virtual void OnFlushDone() = 0;
+ virtual void OnEmptyBufferDone(scoped_refptr<Buffer> buffer) = 0;
+ virtual void OnFillBufferDone(scoped_refptr<VideoFrame> frame) = 0;
+ virtual void OnDeviceError() = 0;
+ };
+
+ typedef enum {
+ kStateUninitialized,
+ kStateNormal,
+ kStateError,
+ kStateFlushing,
+ } GpuVideoDecoderHostState;
+
+ // IPC::Channel::Listener.
+ virtual void OnChannelConnected(int32 peer_pid) {}
+ virtual void OnChannelError();
+ virtual void OnMessageReceived(const IPC::Message& message);
+
+ bool Initialize(const GpuVideoDecoderInitParam& param);
+ bool Uninitialize();
+ void EmptyThisBuffer(scoped_refptr<Buffer> buffer);
+ void FillThisBuffer(scoped_refptr<VideoFrame> frame);
+ bool Flush();
+
+ int32 decoder_id() { return decoder_info_.decoder_id_; }
+ int32 route_id() { return decoder_info_.decoder_route_id_; }
+ int32 my_route_id() { return decoder_info_.decoder_host_route_id_; }
+
+ virtual ~GpuVideoDecoderHost() {}
+
+ private:
+ GpuVideoDecoderHost(GpuVideoServiceHost* service_host,
+ GpuChannelHost* channel_host,
+ EventHandler* event_handler,
+ GpuVideoDecoderInfoParam decoder_info)
+ : gpu_video_service_host_(service_host),
+ channel_host_(channel_host),
+ event_handler_(event_handler),
+ decoder_info_(decoder_info),
+ buffer_id_serial_(0),
+ state_(kStateUninitialized),
+ input_buffer_busy_(false) {}
+ friend class GpuVideoServiceHost;
+
+ // Input message handler.
+ void OnInitializeDone(const GpuVideoDecoderInitDoneParam& param);
+ void OnUninitializeDone();
+ void OnFlushDone();
+ void OnEmptyThisBufferDone();
+ void OnFillThisBufferDone(const GpuVideoDecoderOutputBufferParam& param);
+ void OnEmptyThisBufferACK();
+
+ // Helper function.
+ void SendInputBufferToGpu();
+
+ // We expect that GpuVideoServiceHost's always available during our life span.
+ GpuVideoServiceHost* gpu_video_service_host_;
+
+ scoped_refptr<GpuChannelHost> channel_host_;
+
+ // We expect that the client of us will always available during our life span.
+ EventHandler* event_handler_;
+
+ // Globally identify this decoder in the GPU process.
+ GpuVideoDecoderInfoParam decoder_info_;
+
+ // Input buffer id serial number generator.
+ int32 buffer_id_serial_;
+
+ // Hold information about GpuVideoDecoder configuration.
+ GpuVideoDecoderInitParam init_param_;
+
+ // Hold information about output surface format, etc.
+ GpuVideoDecoderInitDoneParam done_param_;
+
+ // Current state of video decoder.
+ GpuVideoDecoderHostState state_;
+
+ // We are not able to push all received buffer to gpu process at once.
+ std::deque<scoped_refptr<Buffer> > input_buffer_queue_;
+
+ // Currently we do not use ring buffer in input buffer, therefore before
+ // GPU process had finished access it, we should not touch it.
+ bool input_buffer_busy_;
+
+ // Transfer buffers for both input and output.
+ // TODO(jiesun): remove output buffer when hardware composition is ready.
+ scoped_ptr<base::SharedMemory> input_transfer_buffer_;
+ scoped_ptr<base::SharedMemory> output_transfer_buffer_;
+
+ DISALLOW_COPY_AND_ASSIGN(GpuVideoDecoderHost);
+};
+
+#endif // CHROME_RENDERER_GPU_VIDEO_DECODER_HOST_H_
+
diff --git a/chrome/renderer/gpu_video_service_host.cc b/chrome/renderer/gpu_video_service_host.cc
new file mode 100644
index 0000000..de5e9a0
--- /dev/null
+++ b/chrome/renderer/gpu_video_service_host.cc
@@ -0,0 +1,91 @@
+// Copyright (c) 2010 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "chrome/renderer/gpu_video_service_host.h"
+
+#include "chrome/common/gpu_messages.h"
+#include "chrome/renderer/gpu_video_decoder_host.h"
+#include "chrome/renderer/render_thread.h"
+
+void GpuVideoServiceHost::OnChannelError() {
+ LOG(ERROR) << "GpuVideoServiceHost::OnChannelError";
+ channel_host_.release();
+ router_ = NULL;
+}
+
+void GpuVideoServiceHost::OnMessageReceived(const IPC::Message& msg) {
+#if 0
+ IPC_BEGIN_MESSAGE_MAP(GpuVideoServiceHost, msg)
+ IPC_MESSAGE_UNHANDLED_ERROR()
+ IPC_END_MESSAGE_MAP()
+#endif
+}
+
+scoped_refptr<GpuVideoDecoderHost> GpuVideoServiceHost::CreateVideoDecoder(
+ GpuVideoDecoderHost::EventHandler* event_handler) {
+ DCHECK(RenderThread::current());
+
+ if (!channel_host_.get() || !service_info_.service_available_)
+ return NULL;
+
+ GpuVideoDecoderInfoParam param;
+ if (!channel_host_->Send(new GpuChannelMsg_CreateVideoDecoder(&param))) {
+ LOG(ERROR) << "GpuChannelMsg_CreateVideoDecoder failed";
+ return NULL;
+ }
+
+ scoped_refptr<GpuVideoDecoderHost> gpu_video_decoder_host =
+ new GpuVideoDecoderHost(this, channel_host_, event_handler, param);
+ if (!gpu_video_decoder_host.get()) {
+ if (!channel_host_->Send(
+ new GpuChannelMsg_DestroyVideoDecoder(param.decoder_id_))) {
+ LOG(ERROR) << "GpuChannelMsg_DestroyVideoDecoder failed";
+ }
+ return NULL;
+ }
+
+ router_->AddRoute(gpu_video_decoder_host->my_route_id(),
+ gpu_video_decoder_host.get());
+ return gpu_video_decoder_host;
+}
+
+void GpuVideoServiceHost::DestroyVideoDecoder(
+ scoped_refptr<GpuVideoDecoderHost> gpu_video_decoder_host) {
+ DCHECK(RenderThread::current());
+
+ if (!channel_host_.get() || !service_info_.service_available_)
+ return;
+
+ DCHECK(gpu_video_decoder_host.get());
+
+ int32 decoder_id = gpu_video_decoder_host->decoder_id();
+ if (!channel_host_->Send(new GpuChannelMsg_DestroyVideoDecoder(decoder_id))) {
+ LOG(ERROR) << "GpuChannelMsg_DestroyVideoDecoder failed";
+ }
+
+ router_->RemoveRoute(gpu_video_decoder_host->my_route_id());
+}
+
+void GpuVideoServiceHost::OnRendererThreadInit(MessageLoop* message_loop) {
+ message_loop_ = message_loop;
+}
+
+void GpuVideoServiceHost::OnGpuChannelConnected(
+ GpuChannelHost* channel_host,
+ MessageRouter* router,
+ IPC::SyncChannel* channel) {
+
+ channel_host_ = channel_host;
+ router_ = router;
+
+ // Get the routing_id of video service in GPU process.
+ service_info_.service_available_ = 0;
+ if (!channel_host_->Send(new GpuChannelMsg_GetVideoService(&service_info_))) {
+ LOG(ERROR) << "GpuChannelMsg_GetVideoService failed";
+ }
+
+ if (service_info_.service_available_)
+ router->AddRoute(service_info_.video_service_host_route_id_, this);
+}
+
diff --git a/chrome/renderer/gpu_video_service_host.h b/chrome/renderer/gpu_video_service_host.h
new file mode 100644
index 0000000..7b9fb65
--- /dev/null
+++ b/chrome/renderer/gpu_video_service_host.h
@@ -0,0 +1,51 @@
+// Copyright (c) 2010 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef CHROME_RENDERER_GPU_VIDEO_SERVICE_HOST_H_
+#define CHROME_RENDERER_GPU_VIDEO_SERVICE_HOST_H_
+
+#include <map>
+
+#include "base/singleton.h"
+#include "chrome/common/gpu_video_common.h"
+#include "chrome/renderer/gpu_channel_host.h"
+#include "chrome/renderer/gpu_video_decoder_host.h"
+#include "ipc/ipc_channel.h"
+#include "media/base/buffers.h"
+#include "media/base/video_frame.h"
+
+class GpuVideoServiceHost : public IPC::Channel::Listener,
+ public Singleton<GpuVideoServiceHost> {
+ public:
+ // IPC::Channel::Listener.
+ virtual void OnChannelConnected(int32 peer_pid) {}
+ virtual void OnChannelError();
+ virtual void OnMessageReceived(const IPC::Message& message);
+
+ void OnRendererThreadInit(MessageLoop* message_loop);
+ void OnGpuChannelConnected(GpuChannelHost* channel_host,
+ MessageRouter* router,
+ IPC::SyncChannel* channel);
+
+ // call at RenderThread. one per renderer process.
+ scoped_refptr<GpuVideoDecoderHost> CreateVideoDecoder(
+ GpuVideoDecoderHost::EventHandler* event_handler);
+ void DestroyVideoDecoder(scoped_refptr<GpuVideoDecoderHost>);
+
+ private:
+ GpuVideoServiceHost() : message_loop_(NULL) {
+ service_info_.service_available_ = 0;
+ }
+
+ scoped_refptr<GpuChannelHost> channel_host_;
+ MessageRouter* router_;
+ GpuVideoServiceInfoParam service_info_;
+ MessageLoop* message_loop_; // Message loop of render thread.
+
+ friend struct DefaultSingletonTraits<GpuVideoServiceHost>;
+ DISALLOW_COPY_AND_ASSIGN(GpuVideoServiceHost);
+};
+
+#endif // CHROME_RENDERER_GPU_VIDEO_SERVICE_HOST_H_
+
diff --git a/chrome/renderer/media/ipc_video_decoder.cc b/chrome/renderer/media/ipc_video_decoder.cc
new file mode 100644
index 0000000..9d47260
--- /dev/null
+++ b/chrome/renderer/media/ipc_video_decoder.cc
@@ -0,0 +1,344 @@
+// Copyright (c) 2010 The Chromium Authors. All rights reserved. Use of this
+// source code is governed by a BSD-style license that can be found in the
+// LICENSE file.
+
+
+#include "chrome/renderer/media/ipc_video_decoder.h"
+
+#include "base/task.h"
+#include "media/base/callback.h"
+#include "media/base/filters.h"
+#include "media/base/filter_host.h"
+#include "media/base/limits.h"
+#include "media/base/media_format.h"
+#include "media/base/video_frame.h"
+#include "media/ffmpeg/ffmpeg_common.h"
+#include "media/ffmpeg/ffmpeg_util.h"
+#include "media/filters/ffmpeg_interfaces.h"
+
+namespace media {
+
+IpcVideoDecoder::IpcVideoDecoder(MessageLoop* message_loop)
+ : width_(0),
+ height_(0),
+ state_(kUnInitialized),
+ pending_reads_(0),
+ pending_requests_(0),
+ renderer_thread_message_loop_(message_loop) {
+}
+
+IpcVideoDecoder::~IpcVideoDecoder() {
+}
+
+void IpcVideoDecoder::Initialize(DemuxerStream* demuxer_stream,
+ FilterCallback* callback) {
+ if (MessageLoop::current() != renderer_thread_message_loop_) {
+ renderer_thread_message_loop_->PostTask(
+ FROM_HERE,
+ NewRunnableMethod(this,
+ &IpcVideoDecoder::Initialize,
+ demuxer_stream,
+ callback));
+ return;
+ }
+
+ CHECK(!demuxer_stream_);
+ demuxer_stream_ = demuxer_stream;
+ initialize_callback_.reset(callback);
+
+ // Get the AVStream by querying for the provider interface.
+ AVStreamProvider* av_stream_provider;
+ if (!demuxer_stream->QueryInterface(&av_stream_provider)) {
+ GpuVideoDecoderInitDoneParam param;
+ OnInitializeDone(false, param);
+ return;
+ }
+
+ AVStream* av_stream = av_stream_provider->GetAVStream();
+ width_ = av_stream->codec->width;
+ height_ = av_stream->codec->height;
+
+ // Create hardware decoder instance.
+ GpuVideoServiceHost* gpu_video_service_host = GpuVideoServiceHost::get();
+ gpu_video_decoder_host_ = gpu_video_service_host->CreateVideoDecoder(this);
+
+ // Initialize hardware decoder.
+ GpuVideoDecoderInitParam param;
+ param.width_ = width_;
+ param.height_ = height_;
+ if (!gpu_video_decoder_host_->Initialize(param)) {
+ GpuVideoDecoderInitDoneParam param;
+ OnInitializeDone(false, param);
+ }
+}
+
+void IpcVideoDecoder::OnInitializeDone(
+ bool success, const GpuVideoDecoderInitDoneParam& param) {
+ if (MessageLoop::current() != renderer_thread_message_loop_) {
+ renderer_thread_message_loop_->PostTask(
+ FROM_HERE,
+ NewRunnableMethod(this,
+ &IpcVideoDecoder::OnInitializeDone,
+ success,
+ param));
+ return;
+ }
+
+ AutoCallbackRunner done_runner(initialize_callback_.release());
+
+ if (success) {
+ media_format_.SetAsString(MediaFormat::kMimeType,
+ mime_type::kUncompressedVideo);
+ media_format_.SetAsInteger(MediaFormat::kWidth, width_);
+ media_format_.SetAsInteger(MediaFormat::kHeight, height_);
+ media_format_.SetAsInteger(MediaFormat::kSurfaceType,
+ static_cast<int>(param.surface_type_));
+ media_format_.SetAsInteger(MediaFormat::kSurfaceFormat,
+ static_cast<int>(param.format_));
+ state_ = kPlaying;
+ } else {
+ LOG(ERROR) << "IpcVideoDecoder initialization failed!";
+ host()->SetError(PIPELINE_ERROR_DECODE);
+ }
+}
+
+void IpcVideoDecoder::Stop(FilterCallback* callback) {
+ if (MessageLoop::current() != renderer_thread_message_loop_) {
+ renderer_thread_message_loop_->PostTask(
+ FROM_HERE,
+ NewRunnableMethod(this,
+ &IpcVideoDecoder::Stop,
+ callback));
+ return;
+ }
+
+ stop_callback_.reset(callback);
+ if (!gpu_video_decoder_host_->Uninitialize()) {
+ LOG(ERROR) << "gpu video decoder destroy failed";
+ IpcVideoDecoder::OnUninitializeDone();
+ }
+}
+
+void IpcVideoDecoder::OnUninitializeDone() {
+ if (MessageLoop::current() != renderer_thread_message_loop_) {
+ renderer_thread_message_loop_->PostTask(
+ FROM_HERE,
+ NewRunnableMethod(this,
+ &IpcVideoDecoder::OnUninitializeDone));
+ return;
+ }
+
+ AutoCallbackRunner done_runner(stop_callback_.release());
+
+ state_ = kStopped;
+}
+
+void IpcVideoDecoder::Pause(FilterCallback* callback) {
+ Flush(callback); // TODO(jiesun): move this to flush().
+}
+
+void IpcVideoDecoder::Flush(FilterCallback* callback) {
+ if (MessageLoop::current() != renderer_thread_message_loop_) {
+ renderer_thread_message_loop_->PostTask(
+ FROM_HERE,
+ NewRunnableMethod(this,
+ &IpcVideoDecoder::Flush,
+ callback));
+ return;
+ }
+
+ state_ = kFlushing;
+
+ flush_callback_.reset(callback);
+
+ if (!gpu_video_decoder_host_->Flush()) {
+ LOG(ERROR) << "gpu video decoder flush failed";
+ OnFlushDone();
+ }
+}
+
+void IpcVideoDecoder::OnFlushDone() {
+ if (MessageLoop::current() != renderer_thread_message_loop_) {
+ renderer_thread_message_loop_->PostTask(
+ FROM_HERE,
+ NewRunnableMethod(this,
+ &IpcVideoDecoder::OnFlushDone));
+ return;
+ }
+
+ if (pending_reads_ == 0 && pending_requests_ == 0 && flush_callback_.get()) {
+ flush_callback_->Run();
+ flush_callback_.reset();
+ }
+}
+
+void IpcVideoDecoder::Seek(base::TimeDelta time, FilterCallback* callback) {
+ if (MessageLoop::current() != renderer_thread_message_loop_) {
+ renderer_thread_message_loop_->PostTask(
+ FROM_HERE,
+ NewRunnableMethod(this,
+ &IpcVideoDecoder::Seek,
+ time,
+ callback));
+ return;
+ }
+
+ OnSeekComplete(callback);
+}
+
+void IpcVideoDecoder::OnSeekComplete(FilterCallback* callback) {
+ if (MessageLoop::current() != renderer_thread_message_loop_) {
+ renderer_thread_message_loop_->PostTask(
+ FROM_HERE,
+ NewRunnableMethod(this,
+ &IpcVideoDecoder::OnSeekComplete,
+ callback));
+ return;
+ }
+
+ AutoCallbackRunner done_runner(callback);
+
+ state_ = kPlaying;
+
+ for (int i = 0; i < 20; ++i) {
+ demuxer_stream_->Read(
+ NewCallback(this,
+ &IpcVideoDecoder::OnReadComplete));
+ ++pending_reads_;
+ }
+}
+
+void IpcVideoDecoder::OnReadComplete(Buffer* buffer) {
+ scoped_refptr<Buffer> buffer_ref = buffer;
+ ReadCompleteTask(buffer_ref);
+}
+
+void IpcVideoDecoder::ReadCompleteTask(scoped_refptr<Buffer> buffer) {
+ if (MessageLoop::current() != renderer_thread_message_loop_) {
+ renderer_thread_message_loop_->PostTask(
+ FROM_HERE,
+ NewRunnableMethod(this,
+ &IpcVideoDecoder::ReadCompleteTask,
+ buffer));
+ return;
+ }
+
+ DCHECK_GT(pending_reads_, 0u);
+ --pending_reads_;
+
+ if (state_ == kStopped || state_ == kEnded) {
+ // Just discard the input buffers
+ return;
+ }
+
+ if (state_ == kFlushing) {
+ if (pending_reads_ == 0 && pending_requests_ == 0) {
+ CHECK(flush_callback_.get());
+ flush_callback_->Run();
+ flush_callback_.reset();
+ state_ = kPlaying;
+ }
+ return;
+ }
+ // Transition to kFlushCodec on the first end of input stream buffer.
+ if (state_ == kPlaying && buffer->IsEndOfStream()) {
+ state_ = kFlushCodec;
+ }
+
+ gpu_video_decoder_host_->EmptyThisBuffer(buffer);
+}
+
+void IpcVideoDecoder::FillThisBuffer(scoped_refptr<VideoFrame> video_frame) {
+ if (MessageLoop::current() != renderer_thread_message_loop_) {
+ renderer_thread_message_loop_->PostTask(
+ FROM_HERE,
+ NewRunnableMethod(this,
+ &IpcVideoDecoder::FillThisBuffer,
+ video_frame));
+ return;
+ }
+
+ // Synchronized flushing before stop should prevent this.
+ CHECK_NE(state_, kStopped);
+
+ // Notify decode engine the available of new frame.
+ ++pending_requests_;
+ gpu_video_decoder_host_->FillThisBuffer(video_frame);
+}
+
+void IpcVideoDecoder::OnFillBufferDone(scoped_refptr<VideoFrame> video_frame) {
+ if (MessageLoop::current() != renderer_thread_message_loop_) {
+ renderer_thread_message_loop_->PostTask(
+ FROM_HERE,
+ NewRunnableMethod(this,
+ &IpcVideoDecoder::OnFillBufferDone,
+ video_frame));
+ return;
+ }
+
+ if (video_frame.get()) {
+ --pending_requests_;
+ fill_buffer_done_callback()->Run(video_frame);
+ if (state_ == kFlushing && pending_reads_ == 0 && pending_requests_ == 0) {
+ CHECK(flush_callback_.get());
+ flush_callback_->Run();
+ flush_callback_.reset();
+ state_ = kPlaying;
+ }
+
+ } else {
+ if (state_ == kFlushCodec) {
+ // When in kFlushCodec, any errored decode, or a 0-lengthed frame,
+ // is taken as a signal to stop decoding.
+ state_ = kEnded;
+ scoped_refptr<VideoFrame> video_frame;
+ VideoFrame::CreateEmptyFrame(&video_frame);
+ fill_buffer_done_callback()->Run(video_frame);
+ }
+ }
+}
+
+void IpcVideoDecoder::OnEmptyBufferDone(scoped_refptr<Buffer> buffer) {
+ if (MessageLoop::current() != renderer_thread_message_loop_) {
+ renderer_thread_message_loop_->PostTask(
+ FROM_HERE,
+ NewRunnableMethod(this,
+ &IpcVideoDecoder::OnEmptyBufferDone,
+ buffer));
+ return;
+ }
+
+ // TODO(jiesun): We haven't recycle input buffer yet.
+ demuxer_stream_->Read(NewCallback(this, &IpcVideoDecoder::OnReadComplete));
+ ++pending_reads_;
+}
+
+void IpcVideoDecoder::OnDeviceError() {
+ host()->SetError(PIPELINE_ERROR_DECODE);
+}
+
+bool IpcVideoDecoder::ProvidesBuffer() {
+ return true;
+}
+
+// static
+FilterFactory* IpcVideoDecoder::CreateFactory(MessageLoop* message_loop) {
+ return new FilterFactoryImpl1<IpcVideoDecoder, MessageLoop*>(message_loop);
+}
+
+// static
+bool IpcVideoDecoder::IsMediaFormatSupported(const MediaFormat& format) {
+ std::string mime_type;
+ if (!format.GetAsString(MediaFormat::kMimeType, &mime_type) &&
+ mime_type::kFFmpegVideo != mime_type)
+ return false;
+
+ // TODO(jiesun): Although we current only support H264 hardware decoding,
+ // in the future, we should query GpuVideoService for capabilities.
+ int codec_id;
+ return format.GetAsInteger(MediaFormat::kFFmpegCodecID, &codec_id) &&
+ codec_id == CODEC_ID_H264;
+}
+
+} // namespace media
+
diff --git a/chrome/renderer/media/ipc_video_decoder.h b/chrome/renderer/media/ipc_video_decoder.h
new file mode 100644
index 0000000..40a8098
--- /dev/null
+++ b/chrome/renderer/media/ipc_video_decoder.h
@@ -0,0 +1,100 @@
+// Copyright (c) 2010 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef CHROME_RENDERER_MEDIA_IPC_VIDEO_DECODER_H_
+#define CHROME_RENDERER_MEDIA_IPC_VIDEO_DECODER_H_
+
+#include "base/time.h"
+#include "chrome/renderer/gpu_video_service_host.h"
+#include "media/base/pts_heap.h"
+#include "media/base/video_frame.h"
+#include "media/filters/decoder_base.h"
+
+struct AVRational;
+
+namespace media {
+
+class VideoDecodeEngine;
+
+class IpcVideoDecoder : public VideoDecoder,
+ public GpuVideoDecoderHost::EventHandler {
+ public:
+ explicit IpcVideoDecoder(MessageLoop* message_loop);
+ virtual ~IpcVideoDecoder();
+
+ static FilterFactory* CreateFactory(MessageLoop* message_loop);
+ static bool IsMediaFormatSupported(const MediaFormat& media_format);
+
+ // MediaFilter implementation.
+ virtual void Stop(FilterCallback* callback);
+ virtual void Seek(base::TimeDelta time, FilterCallback* callback);
+ virtual void Pause(FilterCallback* callback);
+ virtual void Flush(FilterCallback* callback);
+
+ // Decoder implementation.
+ virtual void Initialize(DemuxerStream* demuxer_stream,
+ FilterCallback* callback);
+ virtual const MediaFormat& media_format() { return media_format_; }
+ virtual void FillThisBuffer(scoped_refptr<VideoFrame> video_frame);
+
+ // GpuVideoDecoderHost::EventHandler.
+ virtual void OnInitializeDone(bool success,
+ const GpuVideoDecoderInitDoneParam& param);
+ virtual void OnUninitializeDone();
+ virtual void OnFlushDone();
+ virtual void OnEmptyBufferDone(scoped_refptr<Buffer> buffer);
+ virtual void OnFillBufferDone(scoped_refptr<VideoFrame> frame);
+ virtual void OnDeviceError();
+
+ virtual bool ProvidesBuffer();
+
+ private:
+ void OnSeekComplete(FilterCallback* callback);
+ void OnReadComplete(Buffer* buffer);
+ void ReadCompleteTask(scoped_refptr<Buffer> buffer);
+
+ private:
+ friend class FilterFactoryImpl2<IpcVideoDecoder,
+ VideoDecodeEngine*,
+ MessageLoop*>;
+
+ private:
+ int32 width_;
+ int32 height_;
+ MediaFormat media_format_;
+
+ scoped_ptr<FilterCallback> flush_callback_;
+ scoped_ptr<FilterCallback> initialize_callback_;
+ scoped_ptr<FilterCallback> stop_callback_;
+
+ enum DecoderState {
+ kUnInitialized,
+ kPlaying,
+ kFlushing,
+ kPausing,
+ kFlushCodec,
+ kEnded,
+ kStopped,
+ };
+ DecoderState state_;
+
+ // Tracks the number of asynchronous reads issued to |demuxer_stream_|.
+ // Using size_t since it is always compared against deque::size().
+ size_t pending_reads_;
+ // Tracks the number of asynchronous reads issued from renderer.
+ size_t pending_requests_;
+
+ // Pointer to the demuxer stream that will feed us compressed buffers.
+ scoped_refptr<DemuxerStream> demuxer_stream_;
+
+ MessageLoop* renderer_thread_message_loop_;
+ scoped_refptr<GpuVideoDecoderHost> gpu_video_decoder_host_;
+
+ DISALLOW_COPY_AND_ASSIGN(IpcVideoDecoder);
+};
+
+} // namespace media
+
+#endif // CHROME_RENDERER_MEDIA_IPC_VIDEO_DECODER_H_
+
diff --git a/chrome/renderer/render_thread.cc b/chrome/renderer/render_thread.cc
index 8ea6c08..7ab9ae8 100644
--- a/chrome/renderer/render_thread.cc
+++ b/chrome/renderer/render_thread.cc
@@ -49,6 +49,7 @@
#include "chrome/renderer/extensions/renderer_extension_bindings.h"
#include "chrome/renderer/external_extension.h"
#include "chrome/renderer/gpu_channel_host.h"
+#include "chrome/renderer/gpu_video_service_host.h"
#include "chrome/renderer/indexed_db_dispatcher.h"
#include "chrome/renderer/loadtimes_extension_bindings.h"
#include "chrome/renderer/net/renderer_net_predictor.h"
@@ -273,6 +274,8 @@ void RenderThread::Init() {
switches::kPrelaunchGpuProcess)) {
EstablishGpuChannel();
}
+
+ GpuVideoServiceHost::get()->OnRendererThreadInit(MessageLoop::current());
}
RenderThread::~RenderThread() {
diff --git a/chrome/renderer/render_view.cc b/chrome/renderer/render_view.cc
index bc400ef..c314f1c 100644
--- a/chrome/renderer/render_view.cc
+++ b/chrome/renderer/render_view.cc
@@ -52,6 +52,7 @@
#include "chrome/renderer/geolocation_dispatcher.h"
#include "chrome/renderer/localized_error.h"
#include "chrome/renderer/media/audio_renderer_impl.h"
+#include "chrome/renderer/media/ipc_video_decoder.h"
#include "chrome/renderer/media/ipc_video_renderer.h"
#include "chrome/renderer/navigation_state.h"
#include "chrome/renderer/notification_provider.h"
@@ -77,6 +78,7 @@
#include "gfx/rect.h"
#include "grit/generated_resources.h"
#include "grit/renderer_resources.h"
+#include "media/base/media_switches.h"
#include "net/base/data_url.h"
#include "net/base/escape.h"
#include "net/base/net_errors.h"
@@ -2325,6 +2327,13 @@ WebMediaPlayer* RenderView::createMediaPlayer(
AudioRendererImpl::CreateFactory(audio_message_filter()));
}
+ if (cmd_line->HasSwitch(switches::kEnableAcceleratedDecoding) &&
+ cmd_line->HasSwitch(switches::kEnableAcceleratedCompositing)) {
+ // Add the hardware video decoder factory.
+ factory->AddFactory(
+ media::IpcVideoDecoder::CreateFactory(MessageLoop::current()));
+ }
+
WebApplicationCacheHostImpl* appcache_host =
WebApplicationCacheHostImpl::FromFrame(frame);
diff --git a/ipc/ipc_message_utils.h b/ipc/ipc_message_utils.h
index 7b1e498..1486a42 100644
--- a/ipc/ipc_message_utils.h
+++ b/ipc/ipc_message_utils.h
@@ -58,6 +58,8 @@ enum IPCMessageStart {
GpuMsgStart,
GpuHostMsgStart,
GpuChannelMsgStart,
+ GpuVideoDecoderHostMsgStart,
+ GpuVideoDecoderMsgStart,
ServiceMsgStart,
ServiceHostMsgStart,
// NOTE: When you add a new message class, also update
diff --git a/media/base/media_switches.cc b/media/base/media_switches.cc
index 3b684ce..e2612c25 100644
--- a/media/base/media_switches.cc
+++ b/media/base/media_switches.cc
@@ -11,6 +11,9 @@ namespace switches {
const char kAlsaDevice[] = "alsa-device";
#endif
+// Enable hardware decoding through gpu process.
+const char kEnableAcceleratedDecoding[] = "enable-accelerated-decoding";
+
// Enable hardware decoding using OpenMax API.
// In practice this is for ChromeOS ARM.
const char kEnableOpenMax[] = "enable-openmax";
diff --git a/media/base/media_switches.h b/media/base/media_switches.h
index 6e0e0d3..6dd553a 100644
--- a/media/base/media_switches.h
+++ b/media/base/media_switches.h
@@ -15,6 +15,7 @@ namespace switches {
extern const char kAlsaDevice[];
#endif
+extern const char kEnableAcceleratedDecoding[];
extern const char kEnableOpenMax[];
extern const char kVideoThreads[];
diff --git a/media/filters/ffmpeg_demuxer.cc b/media/filters/ffmpeg_demuxer.cc
index 5c2a913..7a74d13 100644
--- a/media/filters/ffmpeg_demuxer.cc
+++ b/media/filters/ffmpeg_demuxer.cc
@@ -426,8 +426,8 @@ void FFmpegDemuxer::InitializeTask(DataSource* data_source,
// Initialize the bitstream if OpenMAX is enabled.
// TODO(hclam): Should be enabled by the decoder.
- if (CommandLine::ForCurrentProcess()->HasSwitch(
- switches::kEnableOpenMax)) {
+ CommandLine* cmd = CommandLine::ForCurrentProcess();
+ if (cmd->HasSwitch(switches::kEnableAcceleratedDecoding)) {
// TODO(ajwong): Unittest this branch of the if statement.
// TODO(hclam): In addition to codec we should also check the container.
const char* filter_name = NULL;