summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--chrome/chrome.gyp4
-rw-r--r--chrome/common/gpu_messages_internal.h4
-rw-r--r--chrome/common/gpu_video_common.cc2
-rw-r--r--chrome/common/gpu_video_common.h3
-rw-r--r--chrome/gpu/gpu_video_decoder.cc28
-rw-r--r--chrome/gpu/media/mft_angle_video_device.cc52
-rw-r--r--chrome/gpu/media/mft_angle_video_device.h42
-rw-r--r--chrome/renderer/gpu_video_decoder_host.cc4
-rw-r--r--chrome/renderer/media/gles2_video_decode_context.cc6
-rw-r--r--media/media.gyp76
-rw-r--r--media/mf/README.chromium23
-rw-r--r--media/mf/file_reader_util.cc211
-rw-r--r--media/mf/file_reader_util.h70
-rw-r--r--media/mf/mft_h264_decoder.cc (renamed from media/video/mft_h264_decode_engine.cc)272
-rw-r--r--media/mf/mft_h264_decoder.h (renamed from media/video/mft_h264_decode_engine.h)73
-rw-r--r--media/mf/mft_h264_decoder_example.cc421
-rw-r--r--media/mf/test/mft_h264_decoder_unittest.cc460
-rw-r--r--media/mf/test/run_all_unittests.cc26
-rw-r--r--media/video/mft_h264_decode_engine_context.cc179
-rw-r--r--media/video/mft_h264_decode_engine_context.h70
-rw-r--r--media/video/mft_h264_decode_engine_unittest.cc410
21 files changed, 1715 insertions, 721 deletions
diff --git a/chrome/chrome.gyp b/chrome/chrome.gyp
index 3ccc202..4e2a038 100644
--- a/chrome/chrome.gyp
+++ b/chrome/chrome.gyp
@@ -787,10 +787,6 @@
],
},
],
- 'sources': [
- 'gpu/media/mft_angle_video_device.cc',
- 'gpu/media/mft_angle_video_device.h',
- ],
}],
['OS=="linux" and target_arch!="arm"', {
'sources': [
diff --git a/chrome/common/gpu_messages_internal.h b/chrome/common/gpu_messages_internal.h
index b95a534..f61be25b1 100644
--- a/chrome/common/gpu_messages_internal.h
+++ b/chrome/common/gpu_messages_internal.h
@@ -348,8 +348,8 @@ IPC_BEGIN_MESSAGES(GpuVideoDecoderHost)
// GpuVideoDecoder reports that a video frame is ready to be consumed.
IPC_MESSAGE_ROUTED4(GpuVideoDecoderHostMsg_ConsumeVideoFrame,
int32, /* Video Frame ID */
- int64, /* Timestamp in microseconds */
- int64, /* Duration in microseconds */
+ int64, /* Timestamp in ms */
+ int64, /* Duration in ms */
int32) /* Flags */
// Allocate video frames for output of the hardware video decoder.
diff --git a/chrome/common/gpu_video_common.cc b/chrome/common/gpu_video_common.cc
index 67466b9..e0279de 100644
--- a/chrome/common/gpu_video_common.cc
+++ b/chrome/common/gpu_video_common.cc
@@ -4,8 +4,6 @@
#include "chrome/common/gpu_video_common.h"
-const int32 kGpuVideoInvalidFrameId = -1;
-
namespace IPC {
///////////////////////////////////////////////////////////////////////////////
diff --git a/chrome/common/gpu_video_common.h b/chrome/common/gpu_video_common.h
index d5a2ced..17d5498 100644
--- a/chrome/common/gpu_video_common.h
+++ b/chrome/common/gpu_video_common.h
@@ -10,9 +10,6 @@
#include "chrome/common/common_param_traits.h"
#include "media/base/video_frame.h"
-// This is used in messages when only buffer flag is meaningful.
-extern const int32 kGpuVideoInvalidFrameId;
-
// Flags assigned to a video buffer for both input and output.
enum GpuVideoBufferFlag {
kGpuVideoEndOfStream = 1 << 0,
diff --git a/chrome/gpu/gpu_video_decoder.cc b/chrome/gpu/gpu_video_decoder.cc
index 0cd9d14..86bcd04 100644
--- a/chrome/gpu/gpu_video_decoder.cc
+++ b/chrome/gpu/gpu_video_decoder.cc
@@ -4,9 +4,7 @@
#include "chrome/gpu/gpu_video_decoder.h"
-#include "base/command_line.h"
#include "chrome/common/child_thread.h"
-#include "chrome/common/chrome_switches.h"
#include "chrome/common/gpu_messages.h"
#include "chrome/gpu/gpu_channel.h"
#include "chrome/gpu/media/fake_gl_video_decode_engine.h"
@@ -14,12 +12,6 @@
#include "media/base/data_buffer.h"
#include "media/base/video_frame.h"
-#if defined(OS_WIN)
-#include "chrome/gpu/media/mft_angle_video_device.h"
-#include "media/video/mft_h264_decode_engine.h"
-#include <d3d9.h>
-#endif
-
void GpuVideoDecoder::OnChannelConnected(int32 peer_pid) {
}
@@ -113,12 +105,7 @@ void GpuVideoDecoder::ProduceVideoSample(scoped_refptr<Buffer> buffer) {
}
void GpuVideoDecoder::ConsumeVideoFrame(scoped_refptr<VideoFrame> frame) {
- if (frame->IsEndOfStream()) {
- SendConsumeVideoFrame(kGpuVideoInvalidFrameId, 0, 0, kGpuVideoEndOfStream);
- return;
- }
-
- int32 frame_id = kGpuVideoInvalidFrameId;
+ int32 frame_id = -1;
for (VideoFrameMap::iterator i = video_frame_map_.begin();
i != video_frame_map_.end(); ++i) {
if (i->second == frame) {
@@ -129,7 +116,8 @@ void GpuVideoDecoder::ConsumeVideoFrame(scoped_refptr<VideoFrame> frame) {
DCHECK_NE(-1, frame_id) << "VideoFrame not recognized";
SendConsumeVideoFrame(frame_id, frame->GetTimestamp().InMicroseconds(),
- frame->GetDuration().InMicroseconds(), 0);
+ frame->GetDuration().InMicroseconds(),
+ frame->IsEndOfStream() ? kGpuVideoEndOfStream : 0);
}
void* GpuVideoDecoder::GetDevice() {
@@ -237,16 +225,8 @@ GpuVideoDecoder::GpuVideoDecoder(
// TODO(jiesun): find a better way to determine which VideoDecodeEngine
// to return on current platform.
-#if defined(OS_WIN)
- const CommandLine& command_line = *CommandLine::ForCurrentProcess();
- if (command_line.HasSwitch(switches::kEnableAcceleratedDecoding)) {
- decode_engine_.reset(new media::MftH264DecodeEngine(true));
- video_device_.reset(new MftAngleVideoDevice());
- }
-#else
decode_engine_.reset(new FakeGlVideoDecodeEngine());
video_device_.reset(new FakeGlVideoDevice());
-#endif
}
void GpuVideoDecoder::OnInitialize(const GpuVideoDecoderInitParam& param) {
@@ -255,7 +235,7 @@ void GpuVideoDecoder::OnInitialize(const GpuVideoDecoderInitParam& param) {
config_.width = param.width;
config_.height = param.height;
config_.opaque_context = NULL;
- decode_engine_->Initialize(message_loop_, this, this, config_);
+ decode_engine_->Initialize(NULL, this, this, config_);
}
void GpuVideoDecoder::OnUninitialize() {
diff --git a/chrome/gpu/media/mft_angle_video_device.cc b/chrome/gpu/media/mft_angle_video_device.cc
deleted file mode 100644
index 1faf9dc..0000000
--- a/chrome/gpu/media/mft_angle_video_device.cc
+++ /dev/null
@@ -1,52 +0,0 @@
-// Copyright (c) 2010 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "chrome/gpu/media/mft_angle_video_device.h"
-
-#include <d3d9.h>
-
-#include "media/base/video_frame.h"
-#include "third_party/angle/src/libGLESv2/main.h"
-
-MftAngleVideoDevice::MftAngleVideoDevice()
- : device_(reinterpret_cast<egl::Display*>(
- eglGetCurrentDisplay())->getDevice()) {
-}
-
-void* MftAngleVideoDevice::GetDevice() {
- return device_;
-}
-
-bool MftAngleVideoDevice::CreateVideoFrameFromGlTextures(
- size_t width, size_t height, media::VideoFrame::Format format,
- const std::vector<media::VideoFrame::GlTexture>& textures,
- scoped_refptr<media::VideoFrame>* frame) {
- media::VideoFrame::GlTexture texture_array[media::VideoFrame::kMaxPlanes];
- memset(texture_array, 0, sizeof(texture_array));
-
- for (size_t i = 0; i < textures.size(); ++i) {
- texture_array[i] = textures[i];
- }
-
- media::VideoFrame::CreateFrameGlTexture(format,
- width,
- height,
- texture_array,
- frame);
- return *frame != NULL;
-}
-
-void MftAngleVideoDevice::ReleaseVideoFrame(
- const scoped_refptr<media::VideoFrame>& frame) {
- // We didn't need to anything here because we didn't allocate any resources
- // for the VideoFrame(s) generated.
-}
-
-bool MftAngleVideoDevice::UploadToVideoFrame(
- void* buffer, scoped_refptr<media::VideoFrame> frame) {
- gl::Context* context = (gl::Context*)eglGetCurrentContext();
- // TODO(hclam): Connect ANGLE to upload the surface to texture when changes
- // to ANGLE is done.
- return true;
-}
diff --git a/chrome/gpu/media/mft_angle_video_device.h b/chrome/gpu/media/mft_angle_video_device.h
deleted file mode 100644
index fb1e0e8..0000000
--- a/chrome/gpu/media/mft_angle_video_device.h
+++ /dev/null
@@ -1,42 +0,0 @@
-// Copyright (c) 2010 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef CHROME_GPU_MEDIA_MFT_ANGLE_VIDEO_DEVICE_H_
-#define CHROME_GPU_MEDIA_MFT_ANGLE_VIDEO_DEVICE_H_
-
-#include "base/scoped_comptr_win.h"
-#include "chrome/gpu/media/gpu_video_device.h"
-
-struct IDirect3DDevice9;
-extern "C" const GUID IID_IDirect3DDevice9;
-
-namespace media {
-class VideoFrame;
-} // namespace media
-
-// This class is used to provide hardware video device, video frames and
-// allow video frames to be uploaded to their final render target.
-//
-// This specifically serves MftH264DecodeEngine in the context of ANGLE.
-class MftAngleVideoDevice : public GpuVideoDevice {
- public:
- MftAngleVideoDevice();
- virtual ~MftAngleVideoDevice() {}
-
- // GpuVideoDevice implementation.
- virtual void* GetDevice();
- virtual bool CreateVideoFrameFromGlTextures(
- size_t width, size_t height, media::VideoFrame::Format format,
- const std::vector<media::VideoFrame::GlTexture>& textures,
- scoped_refptr<media::VideoFrame>* frame);
- virtual void ReleaseVideoFrame(
- const scoped_refptr<media::VideoFrame>& frame);
- virtual bool UploadToVideoFrame(void* buffer,
- scoped_refptr<media::VideoFrame> frame);
-
- private:
- ScopedComPtr<IDirect3DDevice9, &IID_IDirect3DDevice9> device_;
-};
-
-#endif // CHROME_GPU_MEDIA_MFT_ANGLE_VIDEO_DEVICE_H_
diff --git a/chrome/renderer/gpu_video_decoder_host.cc b/chrome/renderer/gpu_video_decoder_host.cc
index bf6c79f..ad81004 100644
--- a/chrome/renderer/gpu_video_decoder_host.cc
+++ b/chrome/renderer/gpu_video_decoder_host.cc
@@ -272,8 +272,8 @@ void GpuVideoDecoderHost::OnConsumeVideoFrame(int32 frame_id, int64 timestamp,
frame = video_frame_map_[frame_id];
DCHECK(frame) << "Invalid frame ID received";
- frame->SetDuration(base::TimeDelta::FromMicroseconds(duration));
- frame->SetTimestamp(base::TimeDelta::FromMicroseconds(timestamp));
+ frame->SetDuration(base::TimeDelta::FromMilliseconds(duration));
+ frame->SetTimestamp(base::TimeDelta::FromMilliseconds(timestamp));
}
event_handler_->ConsumeVideoFrame(frame);
diff --git a/chrome/renderer/media/gles2_video_decode_context.cc b/chrome/renderer/media/gles2_video_decode_context.cc
index e32e22b..bd60ea1 100644
--- a/chrome/renderer/media/gles2_video_decode_context.cc
+++ b/chrome/renderer/media/gles2_video_decode_context.cc
@@ -56,10 +56,8 @@ void Gles2VideoDecodeContext::AllocateVideoFrames(
glGenTextures(planes, textures);
for (int j = 0; j < planes; ++j) {
glBindTexture(GL_TEXTURE_2D, textures[j]);
- glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_NEAREST);
- glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_NEAREST);
- glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE);
- glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE);
+ glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
+ glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
glTexImage2D(GL_TEXTURE_2D, 0, gl_format, width, height, 0, gl_format,
GL_UNSIGNED_BYTE, NULL);
}
diff --git a/media/media.gyp b/media/media.gyp
index 49679b1..ee8acbc 100644
--- a/media/media.gyp
+++ b/media/media.gyp
@@ -150,12 +150,6 @@
],
},
'conditions': [
- ['OS=="win"', {
- 'sources': [
- 'video/mft_h264_decode_engine.cc',
- 'video/mft_h264_decode_engine.h',
- ],
- }],
['OS=="linux" or OS=="freebsd"', {
'link_settings': {
'libraries': [
@@ -452,6 +446,76 @@
},
},
},
+ {
+ 'target_name': 'mft_h264_decoder',
+ 'type': '<(library)',
+ 'dependencies': [
+ 'media',
+ '../base/base.gyp:base',
+ '../base/base.gyp:test_support_base',
+ ],
+ 'include_dirs': [
+ '..',
+ ],
+ 'sources': [
+ 'mf/mft_h264_decoder.cc',
+ 'mf/mft_h264_decoder.h',
+ ],
+ 'msvs_settings': {
+ 'VCLinkerTool': {
+ 'SubSystem': '1', # Set /SUBSYSTEM:CONSOLE
+ },
+ },
+ },
+ {
+ 'target_name': 'mft_h264_decoder_example',
+ 'type': 'executable',
+ 'dependencies': [
+ 'media',
+ 'mft_h264_decoder',
+ '../base/base.gyp:base',
+ '../third_party/ffmpeg/ffmpeg.gyp:ffmpeg',
+ ],
+ 'include_dirs': [
+ '..',
+ ],
+ 'sources': [
+ 'mf/file_reader_util.cc',
+ 'mf/file_reader_util.h',
+ 'mf/mft_h264_decoder_example.cc',
+ ],
+ 'msvs_settings': {
+ 'VCLinkerTool': {
+ 'SubSystem': '1', # Set /SUBSYSTEM:CONSOLE
+ },
+ },
+ },
+ {
+ 'target_name': 'mft_h264_decoder_unittests',
+ 'type': 'executable',
+ 'dependencies': [
+ 'media',
+ 'mft_h264_decoder',
+ '../base/base.gyp:base',
+ '../base/base.gyp:base_i18n',
+ '../testing/gtest.gyp:gtest',
+ '../third_party/ffmpeg/ffmpeg.gyp:ffmpeg',
+ ],
+ 'include_dirs': [
+ '..',
+ ],
+ 'sources': [
+ 'mf/file_reader_util.cc',
+ 'mf/file_reader_util.h',
+ 'mf/test/mft_h264_decoder_unittest.cc',
+ 'mf/test/run_all_unittests.cc',
+ ],
+ 'msvs_settings': {
+ 'VCLinkerTool': {
+ 'SubSystem': '1', # Set /SUBSYSTEM:CONSOLE
+ },
+ },
+ },
],
}],
['OS=="linux" or OS=="freebsd" or OS=="openbsd"', {
diff --git a/media/mf/README.chromium b/media/mf/README.chromium
new file mode 100644
index 0000000..73f1deb
--- /dev/null
+++ b/media/mf/README.chromium
@@ -0,0 +1,23 @@
+This tool demonstrates the use of the Media Foundation H.264 decoder as a
+standalone Media Foundation Transform (MFT). The H.264 decoder takes sample
+objects (IMFSample) containing Annex B streams as input, and outputs decoded
+YV12/NV12 video frames as output, contained in a buffer object (if DXVA is not
+enabled) or a Direct3D surface (if DXVA is enabled.)
+
+This tool uses ffmpeg's parser and bitstream converter to read a file
+containing H.264 video and outputs packets containing Annex B streams which are
+then fed into the H.264 decoder. This tool also demonstrates the use of the
+H.264 decoder using callbacks.
+
+Requirements: Windows 7
+
+Note1: On some video files, there is a mysterious 1-off decoded frame count
+when DXVA is enabled.
+
+Note2: This tool requires the ffmpeg library to have the H.264 codec and Annex
+B bitstream filter. You might need build your own, or grab one from
+http://ffmpeg.arrozcru.org/autobuilds/
+
+Note3: A single H264Mft instance is only for 1 H.264 video stream only.
+Inputting streams consisting of more than 1 video to a single instance
+may result in undefined behavior.
diff --git a/media/mf/file_reader_util.cc b/media/mf/file_reader_util.cc
new file mode 100644
index 0000000..d18afe6
--- /dev/null
+++ b/media/mf/file_reader_util.cc
@@ -0,0 +1,211 @@
+// Copyright (c) 2010 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+//
+// Borrowed from media/tools/omx_test/file_reader_util.cc.
+// Added some functionalities related to timestamps on packets.
+
+#include "media/mf/file_reader_util.h"
+
+#include <cstring>
+
+#include <algorithm>
+
+#include "base/logging.h"
+#include "media/base/data_buffer.h"
+#include "media/ffmpeg/ffmpeg_common.h"
+#include "media/filters/bitstream_converter.h"
+
+namespace media {
+
+//////////////////////////////////////////////////////////////////////////////
+// FFmpegFileReader
+FFmpegFileReader::FFmpegFileReader(const std::string& filename)
+ : filename_(filename),
+ format_context_(NULL),
+ codec_context_(NULL),
+ target_stream_(-1),
+ converter_(NULL),
+ last_timestamp_(0) {
+}
+
+FFmpegFileReader::~FFmpegFileReader() {
+ if (format_context_)
+ av_close_input_file(format_context_);
+}
+
+bool FFmpegFileReader::Initialize() {
+ int result = av_open_input_file(&format_context_, filename_.c_str(),
+ NULL, 0, NULL);
+ if (result < 0) {
+ switch (result) {
+ case AVERROR_NOFMT:
+ LOG(ERROR) << "Error: File format not supported "
+ << filename_;
+ break;
+ default:
+ LOG(ERROR) << "Error: Could not open input for "
+ << filename_ << ": " << result;
+ break;
+ }
+ return false;
+ }
+ if (av_find_stream_info(format_context_) < 0) {
+ LOG(ERROR) << "can't use FFmpeg to parse stream info";
+ return false;
+ }
+
+ for (size_t i = 0; i < format_context_->nb_streams; ++i) {
+ codec_context_ = format_context_->streams[i]->codec;
+
+ // Find the video stream.
+ if (codec_context_->codec_type == CODEC_TYPE_VIDEO) {
+ target_stream_ = i;
+ break;
+ }
+ }
+ if (target_stream_ == -1) {
+ LOG(ERROR) << "no video in the stream";
+ return false;
+ }
+
+ // Initialize the bitstream filter if needed.
+ // TODO(hclam): find a better way to identify mp4 container.
+ if (codec_context_->codec_id == CODEC_ID_H264) {
+ converter_.reset(new media::FFmpegBitstreamConverter(
+ "h264_mp4toannexb", codec_context_));
+ } else if (codec_context_->codec_id == CODEC_ID_MPEG4) {
+ converter_.reset(new media::FFmpegBitstreamConverter(
+ "mpeg4video_es", codec_context_));
+ } else if (codec_context_->codec_id == CODEC_ID_WMV3) {
+ converter_.reset(new media::FFmpegBitstreamConverter(
+ "vc1_asftorcv", codec_context_));
+ } else if (codec_context_->codec_id == CODEC_ID_VC1) {
+ converter_.reset(new media::FFmpegBitstreamConverter(
+ "vc1_asftoannexg", codec_context_));
+ }
+ if (converter_.get() && !converter_->Initialize()) {
+ converter_.reset();
+ LOG(ERROR) << "failed to initialize h264_mp4toannexb filter";
+ return false;
+ }
+ return true;
+}
+
+void FFmpegFileReader::Read(scoped_refptr<DataBuffer>* output) {
+ if (!format_context_ || !codec_context_ || target_stream_ == -1) {
+ *output = new DataBuffer(0);
+ return;
+ }
+ AVPacket packet;
+ bool found = false;
+ while (!found) {
+ int result = av_read_frame(format_context_, &packet);
+ if (result < 0) {
+ *output = new DataBuffer(0);
+ return;
+ }
+ if (packet.stream_index == target_stream_) {
+ if (converter_.get() && !converter_->ConvertPacket(&packet)) {
+ LOG(ERROR) << "failed to convert AVPacket";
+ }
+ last_timestamp_ = std::max(last_timestamp_, packet.pts);
+ CopyPacketToBuffer(&packet, output);
+ found = true;
+ }
+ av_free_packet(&packet);
+ }
+}
+
+bool FFmpegFileReader::SeekForward(int64 seek_amount_us) {
+ if (!format_context_ || !codec_context_ || target_stream_ == -1) {
+ return false;
+ }
+ int64 new_us = TimeBaseToMicroseconds(last_timestamp_) + seek_amount_us;
+ int64 new_timestamp = MicrosecondsToTimeBase(new_us);
+ last_timestamp_ = new_timestamp;
+ return av_seek_frame(format_context_, target_stream_, new_timestamp, 0) >= 0;
+}
+
+bool FFmpegFileReader::GetFrameRate(int* num, int* denom) const {
+ if (!codec_context_)
+ return false;
+ *denom = codec_context_->time_base.num;
+ *num = codec_context_->time_base.den;
+ if (*denom == 0) {
+ *num = 0;
+ return false;
+ }
+ return true;
+}
+
+bool FFmpegFileReader::GetWidth(int* width) const {
+ if (!codec_context_)
+ return false;
+ *width = codec_context_->width;
+ return true;
+}
+
+bool FFmpegFileReader::GetHeight(int* height) const {
+ if (!codec_context_)
+ return false;
+ *height = codec_context_->height;
+ return true;
+}
+
+bool FFmpegFileReader::GetAspectRatio(int* num, int* denom) const {
+ if (!codec_context_)
+ return false;
+ AVRational aspect_ratio = codec_context_->sample_aspect_ratio;
+ if (aspect_ratio.num == 0 || aspect_ratio.den == 0)
+ return false;
+ *num = aspect_ratio.num;
+ *denom = aspect_ratio.den;
+ return true;
+}
+
+int64 FFmpegFileReader::TimeBaseToMicroseconds(
+ int64 time_base_unit) const {
+ // FFmpeg units after time base conversion seems to be actually given in
+ // milliseconds (instead of seconds...) so we need to multiply it by a factor
+ // of 1,000.
+ // Note we need to double this because the frame rate is doubled in
+ // ffmpeg.
+ CHECK(codec_context_) << "Codec context needs to be initialized";
+ return time_base_unit * 2000 * codec_context_->time_base.num /
+ codec_context_->time_base.den;
+}
+
+int64 FFmpegFileReader::MicrosecondsToTimeBase(
+ int64 time_base_unit) const {
+ CHECK(codec_context_) << "Codec context needs to be initialized";
+ return time_base_unit * codec_context_->time_base.den / 2000 /
+ codec_context_->time_base.num;
+}
+
+void FFmpegFileReader::CopyPacketToBuffer(AVPacket* packet,
+ scoped_refptr<DataBuffer>* output) {
+ uint8* buffer = new uint8[packet->size];
+ if (buffer == NULL) {
+ LOG(ERROR) << "Failed to allocate buffer for annex b stream";
+ *output = NULL;
+ return;
+ }
+ memcpy(buffer, packet->data, packet->size);
+ *output = new DataBuffer(buffer, packet->size);
+ if (packet->pts != AV_NOPTS_VALUE) {
+ (*output)->SetTimestamp(
+ base::TimeDelta::FromMicroseconds(
+ TimeBaseToMicroseconds(packet->pts)));
+ } else {
+ (*output)->SetTimestamp(StreamSample::kInvalidTimestamp);
+ }
+ if (packet->duration == 0) {
+ LOG(WARNING) << "Packet duration not known";
+ }
+ (*output)->SetDuration(
+ base::TimeDelta::FromMicroseconds(
+ TimeBaseToMicroseconds(packet->duration)));
+}
+
+} // namespace media
diff --git a/media/mf/file_reader_util.h b/media/mf/file_reader_util.h
new file mode 100644
index 0000000..a5fa9ec
--- /dev/null
+++ b/media/mf/file_reader_util.h
@@ -0,0 +1,70 @@
+// Copyright (c) 2010 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+//
+// Borrowed from media/tools/omx_test/file_reader_util.h.
+// Added some functionalities related to timestamps on packets and Media
+// Foundation.
+
+#ifndef MEDIA_MF_FILE_READER_UTIL_H_
+#define MEDIA_MF_FILE_READER_UTIL_H_
+
+#include <string>
+
+#include "base/basictypes.h"
+#include "base/ref_counted.h"
+#include "base/scoped_ptr.h"
+
+struct AVCodecContext;
+struct AVFormatContext;
+struct AVPacket;
+
+namespace media {
+
+class BitstreamConverter;
+class DataBuffer;
+
+// A class to help reading and parsing input file for use in omx_test.
+class FileReader {
+ public:
+ virtual ~FileReader() {}
+
+ // Initialize FileReader object, returns true if successful.
+ virtual bool Initialize() = 0;
+
+ // Read the file into |output|, and output the number of bytes read to
+ // |size|.
+ virtual void Read(scoped_refptr<DataBuffer>* output) = 0;
+};
+
+class FFmpegFileReader : public FileReader {
+ public:
+ explicit FFmpegFileReader(const std::string& filename);
+ virtual ~FFmpegFileReader();
+ virtual bool Initialize();
+ virtual void Read(scoped_refptr<DataBuffer>* output);
+ virtual bool SeekForward(int64 seek_amount_us);
+
+ bool GetFrameRate(int* num, int* denom) const;
+ bool GetWidth(int* width) const;
+ bool GetHeight(int* height) const;
+ bool GetAspectRatio(int* num, int* denom) const;
+ int64 TimeBaseToMicroseconds(int64 time_base_unit) const;
+ int64 MicrosecondsToTimeBase(int64 time_base_unit) const;
+
+ private:
+ void CopyPacketToBuffer(AVPacket* packet, scoped_refptr<DataBuffer>* output);
+
+ std::string filename_;
+ AVFormatContext* format_context_;
+ AVCodecContext* codec_context_;
+ int target_stream_;
+ scoped_ptr<BitstreamConverter> converter_;
+ int64 last_timestamp_;
+
+ DISALLOW_COPY_AND_ASSIGN(FFmpegFileReader);
+};
+
+} // namespace media
+
+#endif // MEDIA_MF_FILE_READER_UTIL_H_
diff --git a/media/video/mft_h264_decode_engine.cc b/media/mf/mft_h264_decoder.cc
index ea59ad6..2c1970d 100644
--- a/media/video/mft_h264_decode_engine.cc
+++ b/media/mf/mft_h264_decoder.cc
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "media/video/mft_h264_decode_engine.h"
+#include "media/mf/mft_h264_decoder.h"
#include <d3d9.h>
#include <dxva2api.h>
@@ -15,8 +15,6 @@
#include "base/time.h"
#include "base/message_loop.h"
-#include "media/base/limits.h"
-#include "media/video/video_decode_context.h"
#pragma comment(lib, "dxva2.lib")
#pragma comment(lib, "d3d9.lib")
@@ -151,42 +149,48 @@ namespace media {
// public methods
-MftH264DecodeEngine::MftH264DecodeEngine(bool use_dxva)
+MftH264Decoder::MftH264Decoder(bool use_dxva, HWND draw_window)
: use_dxva_(use_dxva),
+ d3d9_(NULL),
+ device_(NULL),
+ device_manager_(NULL),
+ draw_window_(draw_window),
+ decoder_(NULL),
+ input_stream_info_(),
+ output_stream_info_(),
state_(kUninitialized),
event_handler_(NULL) {
- memset(&input_stream_info_, 0, sizeof(input_stream_info_));
- memset(&output_stream_info_, 0, sizeof(output_stream_info_));
memset(&config_, 0, sizeof(config_));
memset(&info_, 0, sizeof(info_));
}
-MftH264DecodeEngine::~MftH264DecodeEngine() {
+MftH264Decoder::~MftH264Decoder() {
}
-void MftH264DecodeEngine::Initialize(
+void MftH264Decoder::Initialize(
MessageLoop* message_loop,
VideoDecodeEngine::EventHandler* event_handler,
VideoDecodeContext* context,
const VideoCodecConfig& config) {
- DCHECK(!use_dxva_ || context);
+ LOG(INFO) << "MftH264Decoder::Initialize";
if (state_ != kUninitialized) {
LOG(ERROR) << "Initialize: invalid state";
return;
}
if (!message_loop || !event_handler) {
- LOG(ERROR) << "MftH264DecodeEngine::Initialize: parameters cannot be NULL";
+ LOG(ERROR) << "MftH264Decoder::Initialize: parameters cannot be NULL";
return;
}
- context_ = context;
+
config_ = config;
event_handler_ = event_handler;
+
info_.provides_buffers = true;
+ // TODO(jiesun): Actually it is more likely an NV12 D3DSuface9.
+ // Until we had hardware composition working.
if (use_dxva_) {
info_.stream_info.surface_format = VideoFrame::NV12;
- // TODO(hclam): Need to correct this since this is not really GL texture.
- // We should just remove surface_type from stream_info.
info_.stream_info.surface_type = VideoFrame::TYPE_GL_TEXTURE;
} else {
info_.stream_info.surface_format = VideoFrame::YV12;
@@ -198,56 +202,58 @@ void MftH264DecodeEngine::Initialize(
info_.success = InitInternal();
if (info_.success) {
state_ = kNormal;
- AllocFramesFromContext();
- } else {
- LOG(ERROR) << "MftH264DecodeEngine::Initialize failed";
event_handler_->OnInitializeComplete(info_);
+ } else {
+ LOG(ERROR) << "MftH264Decoder::Initialize failed";
}
}
-void MftH264DecodeEngine::Uninitialize() {
+void MftH264Decoder::Uninitialize() {
+ LOG(INFO) << "MftH264Decoder::Uninitialize";
if (state_ == kUninitialized) {
LOG(ERROR) << "Uninitialize: invalid state";
return;
}
- // TODO(hclam): Call ShutdownComLibraries only after MFT is released.
- decode_engine_.Release();
+ // TODO(imcheng):
+ // Cannot shutdown COM libraries here because the COM objects still needs
+ // to be Release()'ed. We can explicitly release them here, or move the
+ // uninitialize to GpuVideoService...
+ decoder_.Release();
+ device_manager_.Release();
+ device_.Release();
+ d3d9_.Release();
ShutdownComLibraries();
state_ = kUninitialized;
event_handler_->OnUninitializeComplete();
}
-void MftH264DecodeEngine::Flush() {
+void MftH264Decoder::Flush() {
+ LOG(INFO) << "MftH264Decoder::Flush";
if (state_ != kNormal) {
LOG(ERROR) << "Flush: invalid state";
return;
}
state_ = kFlushing;
if (!SendMFTMessage(MFT_MESSAGE_COMMAND_FLUSH)) {
- LOG(WARNING) << "MftH264DecodeEngine::Flush failed to send message";
+ LOG(WARNING) << "MftH264Decoder::Flush failed to send message";
}
state_ = kNormal;
event_handler_->OnFlushComplete();
}
-void MftH264DecodeEngine::Seek() {
+void MftH264Decoder::Seek() {
if (state_ != kNormal) {
LOG(ERROR) << "Seek: invalid state";
return;
}
-
- // TODO(hclam): Seriously the logic in VideoRendererBase is flawed that we
- // have to perform the following hack to get playback going.
- for (size_t i = 0; i < Limits::kMaxVideoFrames; ++i) {
- event_handler_->ConsumeVideoFrame(output_frames_[0]);
- }
-
+ LOG(INFO) << "MftH264Decoder::Seek";
// Seek not implemented.
event_handler_->OnSeekComplete();
}
-void MftH264DecodeEngine::ConsumeVideoSample(scoped_refptr<Buffer> buffer) {
+void MftH264Decoder::ConsumeVideoSample(scoped_refptr<Buffer> buffer) {
+ LOG(INFO) << "MftH264Decoder::ConsumeVideoSample";
if (state_ == kUninitialized) {
LOG(ERROR) << "ConsumeVideoSample: invalid state";
}
@@ -263,37 +269,39 @@ void MftH264DecodeEngine::ConsumeVideoSample(scoped_refptr<Buffer> buffer) {
if (!sample.get()) {
LOG(ERROR) << "Failed to create an input sample";
} else {
- if (FAILED(decode_engine_->ProcessInput(0, sample.get(), 0))) {
+ if (FAILED(decoder_->ProcessInput(0, sample.get(), 0))) {
event_handler_->OnError();
}
}
} else {
- if (state_ != MftH264DecodeEngine::kEosDrain) {
+ if (state_ != MftH264Decoder::kEosDrain) {
// End of stream, send drain messages.
if (!SendMFTMessage(MFT_MESSAGE_NOTIFY_END_OF_STREAM) ||
!SendMFTMessage(MFT_MESSAGE_COMMAND_DRAIN)) {
LOG(ERROR) << "Failed to send EOS / drain messages to MFT";
event_handler_->OnError();
} else {
- state_ = MftH264DecodeEngine::kEosDrain;
+ state_ = MftH264Decoder::kEosDrain;
}
}
}
DoDecode();
}
-void MftH264DecodeEngine::ProduceVideoFrame(scoped_refptr<VideoFrame> frame) {
+void MftH264Decoder::ProduceVideoFrame(scoped_refptr<VideoFrame> frame) {
+ LOG(INFO) << "MftH264Decoder::ProduceVideoFrame";
if (state_ == kUninitialized) {
LOG(ERROR) << "ProduceVideoFrame: invalid state";
return;
}
- event_handler_->ProduceVideoSample(NULL);
+ scoped_refptr<Buffer> buffer;
+ event_handler_->ProduceVideoSample(buffer);
}
// private methods
// static
-bool MftH264DecodeEngine::StartupComLibraries() {
+bool MftH264Decoder::StartupComLibraries() {
HRESULT hr;
hr = CoInitializeEx(NULL,
COINIT_APARTMENTTHREADED | COINIT_DISABLE_OLE1DDE);
@@ -312,7 +320,7 @@ bool MftH264DecodeEngine::StartupComLibraries() {
}
// static
-void MftH264DecodeEngine::ShutdownComLibraries() {
+void MftH264Decoder::ShutdownComLibraries() {
HRESULT hr;
hr = MFShutdown();
if (FAILED(hr)) {
@@ -321,85 +329,100 @@ void MftH264DecodeEngine::ShutdownComLibraries() {
CoUninitialize();
}
-bool MftH264DecodeEngine::EnableDxva() {
- IDirect3DDevice9* device = static_cast<IDirect3DDevice9*>(
- context_->GetDevice());
- ScopedComPtr<IDirect3DDeviceManager9> device_manager;
- UINT dev_manager_reset_token = 0;
- HRESULT hr = DXVA2CreateDirect3DDeviceManager9(&dev_manager_reset_token,
- device_manager.Receive());
- if (FAILED(hr)) {
- LOG(ERROR) << "Couldn't create D3D Device manager";
+bool MftH264Decoder::CreateD3DDevManager() {
+ CHECK(draw_window_);
+ d3d9_.Attach(Direct3DCreate9(D3D_SDK_VERSION));
+ if (d3d9_.get() == NULL) {
+ LOG(ERROR) << "Failed to create D3D9";
return false;
}
- hr = device_manager->ResetDevice(device, dev_manager_reset_token);
+ D3DPRESENT_PARAMETERS present_params = {0};
+ present_params.BackBufferWidth = 0;
+ present_params.BackBufferHeight = 0;
+ present_params.BackBufferFormat = D3DFMT_UNKNOWN;
+ present_params.BackBufferCount = 1;
+ present_params.SwapEffect = D3DSWAPEFFECT_DISCARD;
+ present_params.hDeviceWindow = draw_window_;
+ present_params.Windowed = TRUE;
+ present_params.Flags = D3DPRESENTFLAG_VIDEO;
+ present_params.FullScreen_RefreshRateInHz = 0;
+ present_params.PresentationInterval = 0;
+
+ // D3DCREATE_HARDWARE_VERTEXPROCESSING specifies hardware vertex processing.
+ // (Is it even needed for just video decoding?)
+ HRESULT hr = d3d9_->CreateDevice(D3DADAPTER_DEFAULT,
+ D3DDEVTYPE_HAL,
+ draw_window_,
+ (D3DCREATE_HARDWARE_VERTEXPROCESSING |
+ D3DCREATE_MULTITHREADED),
+ &present_params,
+ device_.Receive());
if (FAILED(hr)) {
- LOG(ERROR) << "Failed to reset device";
+ LOG(ERROR) << "Failed to create D3D Device";
return false;
}
- hr = decode_engine_->ProcessMessage(
- MFT_MESSAGE_SET_D3D_MANAGER,
- reinterpret_cast<ULONG_PTR>(device_manager.get()));
+ UINT dev_manager_reset_token = 0;
+ hr = DXVA2CreateDirect3DDeviceManager9(&dev_manager_reset_token,
+ device_manager_.Receive());
if (FAILED(hr)) {
- LOG(ERROR) << "Failed to set D3D9 device manager to decoder "
- << std::hex << hr;
+ LOG(ERROR) << "Couldn't create D3D Device manager";
return false;
}
+ hr = device_manager_->ResetDevice(device_.get(),
+ dev_manager_reset_token);
+ if (FAILED(hr)) {
+ LOG(ERROR) << "Failed to set device to device manager";
+ return false;
+ }
return true;
}
-bool MftH264DecodeEngine::InitInternal() {
+bool MftH264Decoder::InitInternal() {
if (!StartupComLibraries())
return false;
- if (!InitDecodeEngine())
+ if (use_dxva_ && !CreateD3DDevManager())
+ return false;
+ if (!InitDecoder())
return false;
if (!GetStreamsInfoAndBufferReqs())
return false;
return SendMFTMessage(MFT_MESSAGE_NOTIFY_BEGIN_STREAMING);
}
-bool MftH264DecodeEngine::InitDecodeEngine() {
+bool MftH264Decoder::InitDecoder() {
// TODO(jiesun): use MFEnum to get decoder CLSID.
HRESULT hr = CoCreateInstance(__uuidof(CMSH264DecoderMFT),
NULL,
CLSCTX_INPROC_SERVER,
__uuidof(IMFTransform),
- reinterpret_cast<void**>(
- decode_engine_.Receive()));
- if (FAILED(hr) || !decode_engine_.get()) {
+ reinterpret_cast<void**>(decoder_.Receive()));
+ if (FAILED(hr) || !decoder_.get()) {
LOG(ERROR) << "CoCreateInstance failed " << std::hex << std::showbase << hr;
return false;
}
- if (!CheckDecodeEngineDxvaSupport())
- return false;
- if (use_dxva_ && !EnableDxva())
- return false;
- return SetDecodeEngineMediaTypes();
-}
-void MftH264DecodeEngine::AllocFramesFromContext() {
- if (!use_dxva_)
- return;
+ if (!CheckDecoderDxvaSupport())
+ return false;
- // TODO(imcheng): Pass in an actual task. (From EventHandler?)
- context_->ReleaseAllVideoFrames();
- output_frames_.clear();
- context_->AllocateVideoFrames(
- 1, info_.stream_info.surface_width, info_.stream_info.surface_height,
- VideoFrame::RGBA, &output_frames_,
- NewRunnableMethod(this, &MftH264DecodeEngine::OnAllocFramesDone));
-}
+ if (use_dxva_) {
+ hr = decoder_->ProcessMessage(
+ MFT_MESSAGE_SET_D3D_MANAGER,
+ reinterpret_cast<ULONG_PTR>(device_manager_.get()));
+ if (FAILED(hr)) {
+ LOG(ERROR) << "Failed to set D3D9 device to decoder " << std::hex << hr;
+ return false;
+ }
+ }
-void MftH264DecodeEngine::OnAllocFramesDone() {
- event_handler_->OnInitializeComplete(info_);
+ return SetDecoderMediaTypes();
}
-bool MftH264DecodeEngine::CheckDecodeEngineDxvaSupport() {
+bool MftH264Decoder::CheckDecoderDxvaSupport() {
ScopedComPtr<IMFAttributes> attributes;
- HRESULT hr = decode_engine_->GetAttributes(attributes.Receive());
+ HRESULT hr = decoder_->GetAttributes(attributes.Receive());
if (FAILED(hr)) {
LOG(ERROR) << "Unlock: Failed to get attributes, hr = "
<< std::hex << std::showbase << hr;
@@ -417,14 +440,14 @@ bool MftH264DecodeEngine::CheckDecodeEngineDxvaSupport() {
return true;
}
-bool MftH264DecodeEngine::SetDecodeEngineMediaTypes() {
- if (!SetDecodeEngineInputMediaType())
+bool MftH264Decoder::SetDecoderMediaTypes() {
+ if (!SetDecoderInputMediaType())
return false;
- return SetDecodeEngineOutputMediaType(
- ConvertVideoFrameFormatToGuid(info_.stream_info.surface_format));
+ return SetDecoderOutputMediaType(ConvertVideoFrameFormatToGuid(
+ info_.stream_info.surface_format));
}
-bool MftH264DecodeEngine::SetDecodeEngineInputMediaType() {
+bool MftH264Decoder::SetDecoderInputMediaType() {
ScopedComPtr<IMFMediaType> media_type;
HRESULT hr = MFCreateMediaType(media_type.Receive());
if (FAILED(hr)) {
@@ -444,7 +467,7 @@ bool MftH264DecodeEngine::SetDecodeEngineInputMediaType() {
return false;
}
- hr = decode_engine_->SetInputType(0, media_type.get(), 0); // No flags
+ hr = decoder_->SetInputType(0, media_type.get(), 0); // No flags
if (FAILED(hr)) {
LOG(ERROR) << "Failed to set decoder's input type";
return false;
@@ -453,12 +476,11 @@ bool MftH264DecodeEngine::SetDecodeEngineInputMediaType() {
return true;
}
-bool MftH264DecodeEngine::SetDecodeEngineOutputMediaType(const GUID subtype) {
+bool MftH264Decoder::SetDecoderOutputMediaType(const GUID subtype) {
DWORD i = 0;
IMFMediaType* out_media_type;
bool found = false;
- while (SUCCEEDED(decode_engine_->GetOutputAvailableType(0, i,
- &out_media_type))) {
+ while (SUCCEEDED(decoder_->GetOutputAvailableType(0, i, &out_media_type))) {
GUID out_subtype;
HRESULT hr = out_media_type->GetGUID(MF_MT_SUBTYPE, &out_subtype);
if (FAILED(hr)) {
@@ -467,7 +489,7 @@ bool MftH264DecodeEngine::SetDecodeEngineOutputMediaType(const GUID subtype) {
continue;
}
if (out_subtype == subtype) {
- hr = decode_engine_->SetOutputType(0, out_media_type, 0); // No flags
+ hr = decoder_->SetOutputType(0, out_media_type, 0); // No flags
hr = MFGetAttributeSize(out_media_type, MF_MT_FRAME_SIZE,
reinterpret_cast<UINT32*>(&info_.stream_info.surface_width),
reinterpret_cast<UINT32*>(&info_.stream_info.surface_height));
@@ -487,8 +509,8 @@ bool MftH264DecodeEngine::SetDecodeEngineOutputMediaType(const GUID subtype) {
return false;
}
-bool MftH264DecodeEngine::SendMFTMessage(MFT_MESSAGE_TYPE msg) {
- HRESULT hr = decode_engine_->ProcessMessage(msg, NULL);
+bool MftH264Decoder::SendMFTMessage(MFT_MESSAGE_TYPE msg) {
+ HRESULT hr = decoder_->ProcessMessage(msg, NULL);
return SUCCEEDED(hr);
}
@@ -497,8 +519,8 @@ bool MftH264DecodeEngine::SendMFTMessage(MFT_MESSAGE_TYPE msg) {
// The MFT will not allocate buffer for neither input nor output, so we have
// to do it ourselves and make sure they're the correct size.
// Exception is when dxva is enabled, the decoder will allocate output.
-bool MftH264DecodeEngine::GetStreamsInfoAndBufferReqs() {
- HRESULT hr = decode_engine_->GetInputStreamInfo(0, &input_stream_info_);
+bool MftH264Decoder::GetStreamsInfoAndBufferReqs() {
+ HRESULT hr = decoder_->GetInputStreamInfo(0, &input_stream_info_);
if (FAILED(hr)) {
LOG(ERROR) << "Failed to get input stream info";
return false;
@@ -516,7 +538,7 @@ bool MftH264DecodeEngine::GetStreamsInfoAndBufferReqs() {
LOG(INFO) << "Max lookahead: " << input_stream_info_.cbMaxLookahead;
LOG(INFO) << "Alignment: " << input_stream_info_.cbAlignment;
- hr = decode_engine_->GetOutputStreamInfo(0, &output_stream_info_);
+ hr = decoder_->GetOutputStreamInfo(0, &output_stream_info_);
if (FAILED(hr)) {
LOG(ERROR) << "Failed to get output stream info";
return false;
@@ -534,7 +556,7 @@ bool MftH264DecodeEngine::GetStreamsInfoAndBufferReqs() {
return true;
}
-bool MftH264DecodeEngine::DoDecode() {
+bool MftH264Decoder::DoDecode() {
if (state_ != kNormal && state_ != kEosDrain) {
LOG(ERROR) << "DoDecode: not in normal or drain state";
return false;
@@ -557,10 +579,10 @@ bool MftH264DecodeEngine::DoDecode() {
output_data_buffer.pSample = output_sample;
DWORD status;
- HRESULT hr = decode_engine_->ProcessOutput(0, // No flags
- 1, // # of out streams to pull
- &output_data_buffer,
- &status);
+ HRESULT hr = decoder_->ProcessOutput(0, // No flags
+ 1, // # of out streams to pull from
+ &output_data_buffer,
+ &status);
IMFCollection* events = output_data_buffer.pEvents;
if (events != NULL) {
@@ -570,15 +592,10 @@ bool MftH264DecodeEngine::DoDecode() {
if (FAILED(hr)) {
if (hr == MF_E_TRANSFORM_STREAM_CHANGE) {
- hr = SetDecodeEngineOutputMediaType(
- ConvertVideoFrameFormatToGuid(info_.stream_info.surface_format));
+ hr = SetDecoderOutputMediaType(ConvertVideoFrameFormatToGuid(
+ info_.stream_info.surface_format));
if (SUCCEEDED(hr)) {
- // TODO(hclam): Need to fix this case. This happens when we have a
- // format change. We have to resume decoding only after we have
- // allocated a new set of video frames.
- // AllocFramesFromContext();
- // event_handler_->OnFormatChange(info_.stream_info);
- event_handler_->ProduceVideoSample(NULL);
+ event_handler_->OnFormatChange(info_.stream_info);
return true;
} else {
event_handler_->OnError();
@@ -590,14 +607,13 @@ bool MftH264DecodeEngine::DoDecode() {
scoped_refptr<VideoFrame> frame;
VideoFrame::CreateEmptyFrame(&frame);
event_handler_->ConsumeVideoFrame(frame);
- state_ = MftH264DecodeEngine::kStopped;
+ state_ = MftH264Decoder::kStopped;
return false;
}
- event_handler_->ProduceVideoSample(NULL);
return true;
} else {
LOG(ERROR) << "Unhandled error in DoDecode()";
- state_ = MftH264DecodeEngine::kStopped;
+ state_ = MftH264Decoder::kStopped;
event_handler_->OnError();
return false;
}
@@ -639,25 +655,27 @@ bool MftH264DecodeEngine::DoDecode() {
LOG(ERROR) << "Failed to get buffer from sample";
return true;
}
+
+
+
if (use_dxva_) {
- ScopedComPtr<IDirect3DSurface9, &IID_IDirect3DSurface9> surface;
+ ScopedComPtr<IDirect3DSurface9> surface;
hr = MFGetService(output_buffer, MR_BUFFER_SERVICE,
IID_PPV_ARGS(surface.Receive()));
if (FAILED(hr)) {
LOG(ERROR) << "Failed to get surface from buffer";
return true;
}
- // Since we only allocated 1 frame from context.
- // TODO(imcheng): Detect error.
- output_frames_[0]->SetTimestamp(TimeDelta::FromMicroseconds(timestamp));
- output_frames_[0]->SetDuration(TimeDelta::FromMicroseconds(duration));
- context_->UploadToVideoFrame(
- surface.get(), output_frames_[0],
- NewRunnableMethod(this, &MftH264DecodeEngine::OnUploadVideoFrameDone,
- surface, output_frames_[0]));
+
+ if (!frame.get()) {
+ LOG(ERROR) << "Failed to allocate video frame for d3d texture";
+ event_handler_->OnError();
return true;
+ }
+
+ // The reference is now in the VideoFrame.
+ surface.Detach();
} else {
- // TODO(hclam): Remove this branch.
// Not DXVA.
VideoFrame::CreateFrame(info_.stream_info.surface_format,
info_.stream_info.surface_width,
@@ -679,18 +697,10 @@ bool MftH264DecodeEngine::DoDecode() {
memcpy(dst_y, src_y, current_length);
CHECK(SUCCEEDED(output_buffer->Unlock()));
- event_handler_->ConsumeVideoFrame(frame);
- return true;
}
-}
-
-void MftH264DecodeEngine::OnUploadVideoFrameDone(
- ScopedComPtr<IDirect3DSurface9, &IID_IDirect3DSurface9> surface,
- scoped_refptr<media::VideoFrame> frame) {
- // After this method is exited the reference to surface is released.
+ // TODO(jiesun): non-System memory case
event_handler_->ConsumeVideoFrame(frame);
+ return true;
}
} // namespace media
-
-DISABLE_RUNNABLE_METHOD_REFCOUNT(media::MftH264DecodeEngine);
diff --git a/media/video/mft_h264_decode_engine.h b/media/mf/mft_h264_decoder.h
index e13dce9..57c9e9f 100644
--- a/media/video/mft_h264_decode_engine.h
+++ b/media/mf/mft_h264_decoder.h
@@ -2,33 +2,28 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
//
-// MFT H.264 decode engine.
+// MFT H.264 decoder.
-#ifndef MEDIA_VIDEO_MFT_H264_DECODE_ENGINE_H_
-#define MEDIA_VIDEO_MFT_H264_DECODE_ENGINE_H_
+#ifndef MEDIA_MF_MFT_H264_DECODER_H_
+#define MEDIA_MF_MFT_H264_DECODER_H_
-// TODO(imcheng): Get rid of this header by:
-// - forward declaring IMFTransform and its IID as in
-// mft_h264_decode_engine_context.h
-// - turning the general SendMFTMessage method into specific methods
-// (SendFlushMessage, SendDrainMessage, etc.) to avoid having
-// MFT_MESSAGE_TYPE in here
+#include "build/build_config.h" // For OS_WIN.
+
+#if defined(OS_WIN)
+
+#include <d3d9.h>
+#include <dxva2api.h>
#include <mfidl.h>
#include "base/gtest_prod_util.h"
#include "base/scoped_comptr_win.h"
#include "media/video/video_decode_engine.h"
-struct IDirect3DSurface9;
-extern "C" const GUID IID_IDirect3DSurface9;
-
class MessageLoop;
namespace media {
-class VideoDecodeContext;
-
-class MftH264DecodeEngine : public media::VideoDecodeEngine {
+class MftH264Decoder : public media::VideoDecodeEngine {
public:
typedef enum {
kUninitialized, // un-initialized.
@@ -38,12 +33,10 @@ class MftH264DecodeEngine : public media::VideoDecodeEngine {
kStopped, // upon output EOS received.
} State;
- explicit MftH264DecodeEngine(bool use_dxva);
- virtual ~MftH264DecodeEngine();
-
- // VideoDecodeEngine implementation.
+ explicit MftH264Decoder(bool use_dxva, HWND draw_window);
+ ~MftH264Decoder();
virtual void Initialize(MessageLoop* message_loop,
- media::VideoDecodeEngine::EventHandler* event_handler,
+ VideoDecodeEngine::EventHandler* event_handler,
VideoDecodeContext* context,
const VideoCodecConfig& config);
virtual void Uninitialize();
@@ -53,34 +46,37 @@ class MftH264DecodeEngine : public media::VideoDecodeEngine {
virtual void ProduceVideoFrame(scoped_refptr<VideoFrame> frame);
bool use_dxva() const { return use_dxva_; }
+ IDirect3DDevice9* device() const { return device_.get(); }
State state() const { return state_; }
private:
- friend class MftH264DecodeEngineTest;
- FRIEND_TEST_ALL_PREFIXES(MftH264DecodeEngineTest, LibraryInit);
+ friend class MftH264DecoderTest;
+ FRIEND_TEST_ALL_PREFIXES(MftH264DecoderTest, LibraryInit);
// TODO(jiesun): Find a way to move all these to GpuVideoService..
static bool StartupComLibraries();
static void ShutdownComLibraries();
- bool EnableDxva();
+ bool CreateD3DDevManager();
bool InitInternal();
- bool InitDecodeEngine();
- void AllocFramesFromContext();
- bool CheckDecodeEngineDxvaSupport();
- bool SetDecodeEngineMediaTypes();
- bool SetDecodeEngineInputMediaType();
- bool SetDecodeEngineOutputMediaType(const GUID subtype);
+ bool InitDecoder();
+ bool CheckDecoderDxvaSupport();
+ bool SetDecoderMediaTypes();
+ bool SetDecoderInputMediaType();
+ bool SetDecoderOutputMediaType(const GUID subtype);
bool SendMFTMessage(MFT_MESSAGE_TYPE msg);
bool GetStreamsInfoAndBufferReqs();
+
bool DoDecode();
- void OnAllocFramesDone();
- void OnUploadVideoFrameDone(
- ScopedComPtr<IDirect3DSurface9, &IID_IDirect3DSurface9> surface,
- scoped_refptr<media::VideoFrame> frame);
+
bool use_dxva_;
- ScopedComPtr<IMFTransform> decode_engine_;
+
+ ScopedComPtr<IDirect3D9> d3d9_;
+ ScopedComPtr<IDirect3DDevice9> device_;
+ ScopedComPtr<IDirect3DDeviceManager9> device_manager_;
+ HWND draw_window_;
+ ScopedComPtr<IMFTransform> decoder_;
MFT_INPUT_STREAM_INFO input_stream_info_;
MFT_OUTPUT_STREAM_INFO output_stream_info_;
@@ -91,12 +87,11 @@ class MftH264DecodeEngine : public media::VideoDecodeEngine {
VideoCodecConfig config_;
VideoCodecInfo info_;
- VideoDecodeContext* context_;
- std::vector<scoped_refptr<VideoFrame> > output_frames_;
-
- DISALLOW_COPY_AND_ASSIGN(MftH264DecodeEngine);
+ DISALLOW_COPY_AND_ASSIGN(MftH264Decoder);
};
} // namespace media
-#endif // MEDIA_VIDEO_MFT_H264_DECODE_ENGINE_H_
+#endif // defined(OS_WIN)
+
+#endif // MEDIA_MF_MFT_H264_DECODER_H_
diff --git a/media/mf/mft_h264_decoder_example.cc b/media/mf/mft_h264_decoder_example.cc
new file mode 100644
index 0000000..b5b6b10
--- /dev/null
+++ b/media/mf/mft_h264_decoder_example.cc
@@ -0,0 +1,421 @@
+// Copyright (c) 2010 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+//
+// Demonstrates the use of MftH264Decoder.
+
+#include <cstdio>
+
+#include <string>
+
+#include <d3d9.h>
+#include <dxva2api.h>
+
+#include "base/at_exit.h"
+#include "base/command_line.h"
+#include "base/file_path.h"
+#include "base/logging.h"
+#include "base/message_loop.h"
+#include "base/scoped_comptr_win.h"
+#include "base/scoped_ptr.h"
+#include "base/time.h"
+#include "media/base/data_buffer.h"
+#include "media/base/media.h"
+#include "media/base/video_frame.h"
+#include "media/base/yuv_convert.h"
+#include "media/ffmpeg/ffmpeg_common.h"
+#include "media/ffmpeg/file_protocol.h"
+#include "media/mf/file_reader_util.h"
+#include "media/mf/mft_h264_decoder.h"
+
+using base::AtExitManager;
+using base::Time;
+using base::TimeDelta;
+using media::Buffer;
+using media::DataBuffer;
+using media::FFmpegFileReader;
+using media::MftH264Decoder;
+using media::VideoCodecConfig;
+using media::VideoCodecInfo;
+using media::VideoDecodeEngine;
+using media::VideoFrame;
+using media::VideoStreamInfo;
+
+namespace {
+
+const wchar_t* const kWindowClass = L"Chrome_H264_MFT";
+const wchar_t* const kWindowTitle = L"H264_MFT";
+const int kWindowStyleFlags = (WS_OVERLAPPEDWINDOW | WS_VISIBLE) &
+ ~(WS_MAXIMIZEBOX | WS_THICKFRAME);
+
+void usage() {
+ static char* usage_msg =
+ "Usage: mft_h264_decoder [--enable-dxva] [--render] --input-file=FILE\n"
+ "enable-dxva: Enables hardware accelerated decoding\n"
+ "render: Render to window\n"
+ "During rendering, press spacebar to skip forward at least 5 seconds.\n"
+ "To display this message: mft_h264_decoder --help";
+ fprintf(stderr, "%s\n", usage_msg);
+}
+
+static bool InitFFmpeg() {
+ if (!media::InitializeMediaLibrary(FilePath()))
+ return false;
+ avcodec_init();
+ av_register_all();
+ av_register_protocol2(&kFFmpegFileProtocol, sizeof(kFFmpegFileProtocol));
+ return true;
+}
+
+// Creates a window with the given width and height.
+// Returns: A handle to the window on success, NULL otherwise.
+static HWND CreateDrawWindow(int width, int height) {
+ WNDCLASS window_class = {0};
+ window_class.lpszClassName = kWindowClass;
+ window_class.hInstance = NULL;
+ window_class.hbrBackground = 0;
+ window_class.lpfnWndProc = DefWindowProc;
+ window_class.hCursor = 0;
+
+ RegisterClass(&window_class);
+
+ HWND window = CreateWindow(kWindowClass,
+ kWindowTitle,
+ kWindowStyleFlags,
+ 100,
+ 100,
+ width,
+ height,
+ NULL,
+ NULL,
+ NULL,
+ NULL);
+ if (window == NULL) {
+ LOG(ERROR) << "Failed to create window";
+ return NULL;
+ }
+ RECT rect;
+ rect.left = 0;
+ rect.right = width;
+ rect.top = 0;
+ rect.bottom = height;
+ AdjustWindowRect(&rect, kWindowStyleFlags, FALSE);
+ MoveWindow(window, 0, 0, rect.right - rect.left, rect.bottom - rect.top,
+ TRUE);
+ return window;
+}
+
+class WindowObserver : public base::MessagePumpWin::Observer {
+ public:
+ WindowObserver(FFmpegFileReader* reader, MftH264Decoder* decoder)
+ : reader_(reader),
+ decoder_(decoder) {
+ }
+
+ virtual void WillProcessMessage(const MSG& msg) {
+ if (msg.message == WM_CHAR && msg.wParam == ' ') {
+ // Seek forward 5 seconds.
+ decoder_->Flush();
+ reader_->SeekForward(5000000);
+ }
+ }
+
+ virtual void DidProcessMessage(const MSG& msg) {
+ }
+
+ private:
+ FFmpegFileReader* reader_;
+ MftH264Decoder* decoder_;
+};
+
+class MftH264DecoderHandler
+ : public VideoDecodeEngine::EventHandler,
+ public base::RefCountedThreadSafe<MftH264DecoderHandler> {
+ public:
+ MftH264DecoderHandler() : frames_read_(0), frames_decoded_(0) {
+ memset(&info_, 0, sizeof(info_));
+ }
+ virtual ~MftH264DecoderHandler() {}
+ virtual void OnInitializeComplete(const VideoCodecInfo& info) {
+ info_ = info;
+ }
+ virtual void OnUninitializeComplete() {
+ }
+ virtual void OnFlushComplete() {
+ }
+ virtual void OnSeekComplete() {}
+ virtual void OnError() {}
+ virtual void OnFormatChange(VideoStreamInfo stream_info) {
+ info_.stream_info = stream_info;
+ }
+ virtual void ProduceVideoSample(scoped_refptr<Buffer> buffer) {
+ if (reader_ && decoder_) {
+ scoped_refptr<DataBuffer> input;
+ reader_->Read(&input);
+ if (!input->IsEndOfStream())
+ frames_read_++;
+ decoder_->ConsumeVideoSample(input);
+ }
+ }
+ virtual void ConsumeVideoFrame(scoped_refptr<VideoFrame> frame) {
+ if (frame.get()) {
+ if (frame->format() != VideoFrame::EMPTY) {
+ frames_decoded_++;
+ }
+ }
+ }
+ virtual void SetReader(FFmpegFileReader* reader) {
+ reader_ = reader;
+ }
+ virtual void SetDecoder(MftH264Decoder* decoder) {
+ decoder_= decoder;
+ }
+ virtual void DecodeSingleFrame() {
+ scoped_refptr<VideoFrame> frame;
+ decoder_->ProduceVideoFrame(frame);
+ }
+ virtual void Start() {
+ while (decoder_->state() != MftH264Decoder::kStopped)
+ DecodeSingleFrame();
+ }
+
+ VideoCodecInfo info_;
+ int frames_read_;
+ int frames_decoded_;
+ FFmpegFileReader* reader_;
+ MftH264Decoder* decoder_;
+};
+
+class RenderToWindowHandler : public MftH264DecoderHandler {
+ public:
+ RenderToWindowHandler(HWND window, MessageLoop* loop)
+ : MftH264DecoderHandler(),
+ window_(window),
+ loop_(loop),
+ has_output_(false) {
+ }
+ virtual ~RenderToWindowHandler() {}
+ bool RenderSoftwareFrame(scoped_refptr<VideoFrame> frame) {
+ int width = frame->width();
+ int height = frame->height();
+
+ // Assume height does not change.
+ static uint8* rgb_frame = new uint8[height * frame->stride(0) * 4];
+ uint8* frame_y = static_cast<uint8*>(frame->data(VideoFrame::kYPlane));
+ uint8* frame_u = static_cast<uint8*>(frame->data(VideoFrame::kUPlane));
+ uint8* frame_v = static_cast<uint8*>(frame->data(VideoFrame::kVPlane));
+ media::ConvertYUVToRGB32(frame_y, frame_v, frame_u, rgb_frame,
+ width, height,
+ frame->stride(0), frame->stride(1),
+ 4 * frame->stride(0), media::YV12);
+ PAINTSTRUCT ps;
+ InvalidateRect(window_, NULL, TRUE);
+ HDC hdc = BeginPaint(window_, &ps);
+ BITMAPINFOHEADER hdr;
+ hdr.biSize = sizeof(BITMAPINFOHEADER);
+ hdr.biWidth = width;
+ hdr.biHeight = -height; // minus means top-down bitmap
+ hdr.biPlanes = 1;
+ hdr.biBitCount = 32;
+ hdr.biCompression = BI_RGB; // no compression
+ hdr.biSizeImage = 0;
+ hdr.biXPelsPerMeter = 1;
+ hdr.biYPelsPerMeter = 1;
+ hdr.biClrUsed = 0;
+ hdr.biClrImportant = 0;
+ int rv = StretchDIBits(hdc, 0, 0, width, height, 0, 0, width, height,
+ rgb_frame, reinterpret_cast<BITMAPINFO*>(&hdr),
+ DIB_RGB_COLORS, SRCCOPY);
+ EndPaint(window_, &ps);
+ return rv != 0;
+ }
+ bool RenderD3dSurface(scoped_refptr<VideoFrame> frame) {
+ ScopedComPtr<IDirect3DSurface9> surface;
+ IDirect3DDevice9* device = decoder_->device();
+ // TODO(hclam): Comment this since this file will be removed later.
+ // surface.Attach(static_cast<IDirect3DSurface9*>(frame->d3d_texture(0)));
+ HRESULT hr;
+ hr = device->Clear(0, NULL, D3DCLEAR_TARGET, D3DCOLOR_XRGB(0, 0, 0),
+ 1.0f, 0);
+ if (FAILED(hr)) {
+ LOG(ERROR) << "Device->Clear() failed";
+ return false;
+ }
+ ScopedComPtr<IDirect3DSurface9> backbuffer;
+ hr = device->GetBackBuffer(0, 0, D3DBACKBUFFER_TYPE_MONO,
+ backbuffer.Receive());
+ if (FAILED(hr)) {
+ LOG(ERROR) << "Device->GetBackBuffer() failed";
+ return false;
+ }
+ hr = device->StretchRect(surface.get(), NULL, backbuffer.get(), NULL,
+ D3DTEXF_NONE);
+ if (FAILED(hr)) {
+ LOG(ERROR) << "Device->StretchRect() failed";
+ return false;
+ }
+ hr = device->Present(NULL, NULL, NULL, NULL);
+ if (FAILED(hr)) {
+ if (hr == E_FAIL) {
+ LOG(WARNING) << "Present() returned E_FAIL";
+ } else {
+ static int frames_dropped = 0;
+ LOG(ERROR) << "Device->Present() failed "
+ << std::hex << std::showbase << hr;
+ if (++frames_dropped == 10) {
+ LOG(ERROR) << "Dropped too many frames, quitting";
+ MessageLoopForUI::current()->QuitNow();
+ return false;
+ }
+ }
+ }
+ return true;
+ }
+ virtual void ConsumeVideoFrame(scoped_refptr<VideoFrame> frame) {
+ has_output_ = true;
+ if (frame.get()) {
+ if (frame->format() != VideoFrame::EMPTY) {
+ frames_decoded_++;
+ loop_->PostDelayedTask(
+ FROM_HERE,
+ NewRunnableMethod(this, &RenderToWindowHandler::DecodeSingleFrame),
+ frame->GetDuration().InMilliseconds());
+ bool success;
+ if (decoder_->use_dxva()) {
+ success = RenderD3dSurface(frame);
+ } else {
+ success = RenderSoftwareFrame(frame);
+ }
+ if (!success) {
+ LOG(ERROR) << "Render failed";
+ loop_->QuitNow();
+ }
+ } else { // if frame is type EMPTY, there will be no more frames.
+ loop_->QuitNow();
+ }
+ }
+ }
+ virtual void DecodeSingleFrame() {
+ if (decoder_->state() != MftH264Decoder::kStopped) {
+ while (decoder_->state() != MftH264Decoder::kStopped && !has_output_) {
+ scoped_refptr<VideoFrame> frame;
+ decoder_->ProduceVideoFrame(frame);
+ }
+ if (decoder_->state() == MftH264Decoder::kStopped)
+ loop_->QuitNow();
+ has_output_ = false;
+ } else {
+ loop_->QuitNow();
+ }
+ }
+ virtual void Start() {
+ loop_->PostTask(
+ FROM_HERE,
+ NewRunnableMethod(this, &RenderToWindowHandler::DecodeSingleFrame));
+ loop_->Run();
+ }
+
+ private:
+ HWND window_;
+ MessageLoop* loop_;
+ bool has_output_;
+};
+
+static int Run(bool use_dxva, bool render, const std::string& input_file) {
+ scoped_ptr<FFmpegFileReader> reader(new FFmpegFileReader(input_file));
+ if (reader.get() == NULL || !reader->Initialize()) {
+ LOG(ERROR) << "Failed to create/initialize reader";
+ return -1;
+ }
+ int width = 0, height = 0;
+ if (!reader->GetWidth(&width) || !reader->GetHeight(&height)) {
+ LOG(WARNING) << "Failed to get width/height from reader";
+ }
+ VideoCodecConfig config;
+ config.width = width;
+ config.height = height;
+ HWND window = NULL;
+ if (use_dxva || render) {
+ window = CreateDrawWindow(width, height);
+ if (!render)
+ ShowWindow(window, SW_HIDE);
+ if (window == NULL) {
+ LOG(ERROR) << "Failed to create window";
+ return -1;
+ }
+ }
+
+ scoped_ptr<MftH264Decoder> mft(new MftH264Decoder(use_dxva, window));
+ if (!mft.get()) {
+ LOG(ERROR) << "Failed to create MFT";
+ return -1;
+ }
+
+ scoped_refptr<MftH264DecoderHandler> handler;
+ if (render)
+ handler = new RenderToWindowHandler(window, MessageLoop::current());
+ else
+ handler = new MftH264DecoderHandler();
+ handler->SetDecoder(mft.get());
+ handler->SetReader(reader.get());
+ if (!handler.get()) {
+ LOG(ERROR) << "Failed to create handler";
+ return -1;
+ }
+
+ mft->Initialize(MessageLoop::current(), handler.get(), NULL, config);
+ scoped_ptr<WindowObserver> observer;
+ if (render) {
+ observer.reset(new WindowObserver(reader.get(), mft.get()));
+ MessageLoopForUI::current()->AddObserver(observer.get());
+ }
+
+ Time decode_start(Time::Now());
+ handler->Start();
+ TimeDelta decode_time = Time::Now() - decode_start;
+
+ printf("All done, frames read: %d, frames decoded: %d\n",
+ handler->frames_read_, handler->frames_decoded_);
+ printf("Took %lldms\n", decode_time.InMilliseconds());
+ if (window)
+ DestroyWindow(window);
+ return 0;
+}
+
+} // namespace
+
+int main(int argc, char** argv) {
+ AtExitManager at_exit;
+ MessageLoopForUI message_loop;
+ CommandLine::Init(argc, argv);
+ if (argc == 1) {
+ fprintf(stderr, "Not enough arguments\n");
+ usage();
+ return -1;
+ }
+ const CommandLine& cmd_line = *CommandLine::ForCurrentProcess();
+ if (cmd_line.HasSwitch("help")) {
+ usage();
+ return -1;
+ }
+ bool use_dxva = cmd_line.HasSwitch("enable-dxva");
+ bool render = cmd_line.HasSwitch("render");
+ std::string input_file = cmd_line.GetSwitchValueASCII("input-file");
+ if (input_file.empty()) {
+ fprintf(stderr, "No input file provided\n");
+ usage();
+ return -1;
+ }
+ printf("enable-dxva: %d\n", use_dxva);
+ printf("render: %d\n", render);
+ printf("input-file: %s\n", input_file.c_str());
+
+ if (!InitFFmpeg()) {
+ LOG(ERROR) << "InitFFMpeg() failed";
+ return -1;
+ }
+ int ret = Run(use_dxva, render, input_file);
+
+ printf("Done\n");
+ return ret;
+}
diff --git a/media/mf/test/mft_h264_decoder_unittest.cc b/media/mf/test/mft_h264_decoder_unittest.cc
new file mode 100644
index 0000000..11959f7
--- /dev/null
+++ b/media/mf/test/mft_h264_decoder_unittest.cc
@@ -0,0 +1,460 @@
+// Copyright (c) 2010 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/file_path.h"
+#include "base/file_util.h"
+#include "base/message_loop.h"
+#include "base/path_service.h"
+#include "base/ref_counted.h"
+#include "base/scoped_ptr.h"
+#include "base/string_util.h"
+#include "base/time.h"
+#include "media/base/data_buffer.h"
+#include "media/base/video_frame.h"
+#include "media/mf/file_reader_util.h"
+#include "media/mf/mft_h264_decoder.h"
+#include "media/video/video_decode_engine.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+using base::TimeDelta;
+
+namespace media {
+
+static const int kDecoderMaxWidth = 1920;
+static const int kDecoderMaxHeight = 1088;
+
+static HWND CreateDrawWindow(int width, int height) {
+ static const wchar_t kClassName[] = L"Test";
+ static const wchar_t kWindowTitle[] = L"MFT Unittest Draw Window";
+ WNDCLASS window_class = {0};
+ window_class.lpszClassName = kClassName;
+ window_class.hInstance = NULL;
+ window_class.hbrBackground = 0;
+ window_class.lpfnWndProc = DefWindowProc;
+ window_class.hCursor = 0;
+
+ RegisterClass(&window_class);
+
+ HWND window = CreateWindow(kClassName,
+ kWindowTitle,
+ (WS_OVERLAPPEDWINDOW | WS_VISIBLE) &
+ ~(WS_MAXIMIZEBOX | WS_THICKFRAME),
+ 100,
+ 100,
+ width,
+ height,
+ NULL,
+ NULL,
+ NULL,
+ NULL);
+ if (window == NULL) {
+ LOG(ERROR) << "Failed to create window";
+ return NULL;
+ }
+ return window;
+}
+
+class BaseMftReader : public base::RefCountedThreadSafe<BaseMftReader> {
+ public:
+ virtual ~BaseMftReader() {}
+ virtual void ReadCallback(scoped_refptr<DataBuffer>* input) = 0;
+};
+
+class FakeMftReader : public BaseMftReader {
+ public:
+ FakeMftReader() : frames_remaining_(20) {}
+ explicit FakeMftReader(int count) : frames_remaining_(count) {}
+ virtual ~FakeMftReader() {}
+
+ // Provides garbage input to the decoder.
+ virtual void ReadCallback(scoped_refptr<DataBuffer>* input) {
+ if (frames_remaining_ > 0) {
+ int sz = 4096;
+ uint8* buf = new uint8[sz];
+ memset(buf, 42, sz);
+ *input = new DataBuffer(buf, sz);
+ (*input)->SetDuration(base::TimeDelta::FromMicroseconds(5000));
+ (*input)->SetTimestamp(
+ base::TimeDelta::FromMicroseconds(
+ 50000000 - frames_remaining_ * 10000));
+ --frames_remaining_;
+ } else {
+ // Emulate end of stream on the last "frame".
+ *input = new DataBuffer(0);
+ }
+ }
+ int frames_remaining() const { return frames_remaining_; }
+
+ private:
+ int frames_remaining_;
+};
+
+class FFmpegFileReaderWrapper : public BaseMftReader {
+ public:
+ FFmpegFileReaderWrapper() {}
+ virtual ~FFmpegFileReaderWrapper() {}
+ bool InitReader(const std::string& filename) {
+ reader_.reset(new FFmpegFileReader(filename));
+ if (!reader_.get() || !reader_->Initialize()) {
+ reader_.reset();
+ return false;
+ }
+ return true;
+ }
+ virtual void ReadCallback(scoped_refptr<DataBuffer>* input) {
+ if (reader_.get()) {
+ reader_->Read(input);
+ }
+ }
+ bool GetWidth(int* width) {
+ if (!reader_.get())
+ return false;
+ return reader_->GetWidth(width);
+ }
+ bool GetHeight(int* height) {
+ if (!reader_.get())
+ return false;
+ return reader_->GetHeight(height);
+ }
+ scoped_ptr<FFmpegFileReader> reader_;
+};
+
+class MftH264DecoderTest : public testing::Test {
+ public:
+ MftH264DecoderTest() {}
+ virtual ~MftH264DecoderTest() {}
+
+ protected:
+ virtual void SetUp() {}
+ virtual void TearDown() {}
+};
+
+class SimpleMftH264DecoderHandler : public VideoDecodeEngine::EventHandler {
+ public:
+ SimpleMftH264DecoderHandler()
+ : init_count_(0),
+ uninit_count_(0),
+ flush_count_(0),
+ format_change_count_(0),
+ empty_buffer_callback_count_(0),
+ fill_buffer_callback_count_(0) {
+ memset(&info_, 0, sizeof(info_));
+ }
+ virtual ~SimpleMftH264DecoderHandler() {}
+ virtual void OnInitializeComplete(const VideoCodecInfo& info) {
+ info_ = info;
+ init_count_++;
+ }
+ virtual void OnUninitializeComplete() {
+ uninit_count_++;
+ }
+ virtual void OnFlushComplete() {
+ flush_count_++;
+ }
+ virtual void OnSeekComplete() {}
+ virtual void OnError() {}
+ virtual void OnFormatChange(VideoStreamInfo stream_info) {
+ format_change_count_++;
+ info_.stream_info = stream_info;
+ }
+ virtual void ProduceVideoSample(scoped_refptr<Buffer> buffer) {
+ if (reader_.get() && decoder_) {
+ empty_buffer_callback_count_++;
+ scoped_refptr<DataBuffer> input;
+ reader_->ReadCallback(&input);
+ decoder_->ConsumeVideoSample(input);
+ }
+ }
+ virtual void ConsumeVideoFrame(scoped_refptr<VideoFrame> frame) {
+ fill_buffer_callback_count_++;
+ current_frame_ = frame;
+ }
+ void SetReader(scoped_refptr<BaseMftReader> reader) {
+ reader_ = reader;
+ }
+ void SetDecoder(MftH264Decoder* decoder) {
+ decoder_ = decoder;
+ }
+
+ int init_count_;
+ int uninit_count_;
+ int flush_count_;
+ int format_change_count_;
+ int empty_buffer_callback_count_;
+ int fill_buffer_callback_count_;
+ VideoCodecInfo info_;
+ scoped_refptr<BaseMftReader> reader_;
+ MftH264Decoder* decoder_;
+ scoped_refptr<VideoFrame> current_frame_;
+};
+
+// A simple test case for init/deinit of MF/COM libraries.
+TEST_F(MftH264DecoderTest, LibraryInit) {
+ EXPECT_TRUE(MftH264Decoder::StartupComLibraries());
+ MftH264Decoder::ShutdownComLibraries();
+}
+
+TEST_F(MftH264DecoderTest, DecoderUninitializedAtFirst) {
+ scoped_ptr<MftH264Decoder> decoder(new MftH264Decoder(true, NULL));
+ ASSERT_TRUE(decoder.get());
+ EXPECT_EQ(MftH264Decoder::kUninitialized, decoder->state());
+}
+
+TEST_F(MftH264DecoderTest, DecoderInitMissingArgs) {
+ VideoCodecConfig config;
+ config.width = 800;
+ config.height = 600;
+ scoped_ptr<MftH264Decoder> decoder(new MftH264Decoder(false, NULL));
+ ASSERT_TRUE(decoder.get());
+ decoder->Initialize(NULL, NULL, NULL, config);
+ EXPECT_EQ(MftH264Decoder::kUninitialized, decoder->state());
+}
+
+TEST_F(MftH264DecoderTest, DecoderInitNoDxva) {
+ MessageLoop loop;
+ SimpleMftH264DecoderHandler handler;
+ VideoCodecConfig config;
+ config.width = 800;
+ config.height = 600;
+ scoped_ptr<MftH264Decoder> decoder(new MftH264Decoder(false, NULL));
+ ASSERT_TRUE(decoder.get());
+ decoder->Initialize(&loop, &handler, NULL, config);
+ EXPECT_EQ(1, handler.init_count_);
+ EXPECT_EQ(MftH264Decoder::kNormal, decoder->state());
+ decoder->Uninitialize();
+}
+
+TEST_F(MftH264DecoderTest, DecoderInitDxva) {
+ MessageLoop loop;
+ SimpleMftH264DecoderHandler handler;
+ VideoCodecConfig config;
+ config.width = 800;
+ config.height = 600;
+ HWND hwnd = CreateDrawWindow(config.width, config.height);
+ ASSERT_TRUE(hwnd);
+ scoped_ptr<MftH264Decoder> decoder(new MftH264Decoder(true, hwnd));
+ ASSERT_TRUE(decoder.get());
+ decoder->Initialize(&loop, &handler, NULL, config);
+ EXPECT_EQ(1, handler.init_count_);
+ EXPECT_EQ(MftH264Decoder::kNormal, decoder->state());
+ decoder->Uninitialize();
+ DestroyWindow(hwnd);
+}
+
+TEST_F(MftH264DecoderTest, DecoderUninit) {
+ MessageLoop loop;
+ SimpleMftH264DecoderHandler handler;
+ VideoCodecConfig config;
+ config.width = 800;
+ config.height = 600;
+ scoped_ptr<MftH264Decoder> decoder(new MftH264Decoder(false, NULL));
+ ASSERT_TRUE(decoder.get());
+ decoder->Initialize(&loop, &handler, NULL, config);
+ EXPECT_EQ(MftH264Decoder::kNormal, decoder->state());
+ decoder->Uninitialize();
+ EXPECT_EQ(1, handler.uninit_count_);
+ EXPECT_EQ(MftH264Decoder::kUninitialized, decoder->state());
+}
+
+TEST_F(MftH264DecoderTest, UninitBeforeInit) {
+ MessageLoop loop;
+ SimpleMftH264DecoderHandler handler;
+ VideoCodecConfig config;
+ config.width = 800;
+ config.height = 600;
+ scoped_ptr<MftH264Decoder> decoder(new MftH264Decoder(false, NULL));
+ ASSERT_TRUE(decoder.get());
+ decoder->Uninitialize();
+ EXPECT_EQ(0, handler.uninit_count_);
+}
+
+TEST_F(MftH264DecoderTest, InitWithNegativeDimensions) {
+ MessageLoop loop;
+ SimpleMftH264DecoderHandler handler;
+ VideoCodecConfig config;
+ config.width = -123;
+ config.height = -456;
+ scoped_ptr<MftH264Decoder> decoder(new MftH264Decoder(false, NULL));
+ ASSERT_TRUE(decoder.get());
+ decoder->Initialize(&loop, &handler, NULL, config);
+ EXPECT_EQ(MftH264Decoder::kNormal, decoder->state());
+ EXPECT_EQ(kDecoderMaxWidth, handler.info_.stream_info.surface_width);
+ EXPECT_EQ(kDecoderMaxHeight, handler.info_.stream_info.surface_height);
+ decoder->Uninitialize();
+}
+
+TEST_F(MftH264DecoderTest, InitWithTooHighDimensions) {
+ MessageLoop loop;
+ SimpleMftH264DecoderHandler handler;
+ VideoCodecConfig config;
+ config.width = kDecoderMaxWidth + 1;
+ config.height = kDecoderMaxHeight + 1;
+ scoped_ptr<MftH264Decoder> decoder(new MftH264Decoder(false, NULL));
+ ASSERT_TRUE(decoder.get());
+ decoder->Initialize(&loop, &handler, NULL, config);
+ EXPECT_EQ(MftH264Decoder::kNormal, decoder->state());
+ EXPECT_EQ(kDecoderMaxWidth, handler.info_.stream_info.surface_width);
+ EXPECT_EQ(kDecoderMaxHeight, handler.info_.stream_info.surface_height);
+ decoder->Uninitialize();
+}
+
+TEST_F(MftH264DecoderTest, DrainOnEmptyBuffer) {
+ MessageLoop loop;
+ SimpleMftH264DecoderHandler handler;
+ VideoCodecConfig config;
+ config.width = 1024;
+ config.height = 768;
+ scoped_ptr<MftH264Decoder> decoder(new MftH264Decoder(false, NULL));
+ ASSERT_TRUE(decoder.get());
+ decoder->Initialize(&loop, &handler, NULL, config);
+ EXPECT_EQ(MftH264Decoder::kNormal, decoder->state());
+ scoped_refptr<Buffer> buffer(new DataBuffer(0));
+
+ // Decoder should switch to drain mode because of this NULL buffer, and then
+ // switch to kStopped when it says it needs more input during drain mode.
+ decoder->ConsumeVideoSample(buffer);
+ EXPECT_EQ(MftH264Decoder::kStopped, decoder->state());
+
+ // Should have called back with one empty frame.
+ EXPECT_EQ(1, handler.fill_buffer_callback_count_);
+ ASSERT_TRUE(handler.current_frame_.get());
+ EXPECT_EQ(VideoFrame::EMPTY, handler.current_frame_->format());
+ decoder->Uninitialize();
+}
+
+TEST_F(MftH264DecoderTest, NoOutputOnGarbageInput) {
+ // 100 samples of garbage.
+ const int kNumFrames = 100;
+ scoped_refptr<FakeMftReader> reader(new FakeMftReader(kNumFrames));
+ ASSERT_TRUE(reader.get());
+
+ MessageLoop loop;
+ SimpleMftH264DecoderHandler handler;
+ VideoCodecConfig config;
+ config.width = 1024;
+ config.height = 768;
+ scoped_ptr<MftH264Decoder> decoder(new MftH264Decoder(false, NULL));
+ ASSERT_TRUE(decoder.get());
+ decoder->Initialize(&loop, &handler, NULL, config);
+ EXPECT_EQ(MftH264Decoder::kNormal, decoder->state());
+ handler.SetReader(reader);
+ handler.SetDecoder(decoder.get());
+ while (MftH264Decoder::kStopped != decoder->state()) {
+ scoped_refptr<VideoFrame> frame;
+ decoder->ProduceVideoFrame(frame);
+ }
+
+ // Output callback should only be invoked once - the empty frame to indicate
+ // end of stream.
+ EXPECT_EQ(1, handler.fill_buffer_callback_count_);
+ ASSERT_TRUE(handler.current_frame_.get());
+ EXPECT_EQ(VideoFrame::EMPTY, handler.current_frame_->format());
+
+ // One extra count because of the end of stream NULL sample.
+ EXPECT_EQ(kNumFrames, handler.empty_buffer_callback_count_ - 1);
+ decoder->Uninitialize();
+}
+
+TEST_F(MftH264DecoderTest, FlushAtStart) {
+ MessageLoop loop;
+ SimpleMftH264DecoderHandler handler;
+ VideoCodecConfig config;
+ config.width = 1024;
+ config.height = 768;
+ scoped_ptr<MftH264Decoder> decoder(new MftH264Decoder(false, NULL));
+ ASSERT_TRUE(decoder.get());
+ decoder->Initialize(&loop, &handler, NULL, config);
+ EXPECT_EQ(MftH264Decoder::kNormal, decoder->state());
+ decoder->Flush();
+
+ // Flush should succeed even if input/output are empty.
+ EXPECT_EQ(1, handler.flush_count_);
+ decoder->Uninitialize();
+}
+
+TEST_F(MftH264DecoderTest, NoFlushAtStopped) {
+ scoped_refptr<BaseMftReader> reader(new FakeMftReader());
+ ASSERT_TRUE(reader.get());
+
+ MessageLoop loop;
+ SimpleMftH264DecoderHandler handler;
+ VideoCodecConfig config;
+ config.width = 1024;
+ config.height = 768;
+ scoped_ptr<MftH264Decoder> decoder(new MftH264Decoder(false, NULL));
+ ASSERT_TRUE(decoder.get());
+ decoder->Initialize(&loop, &handler, NULL, config);
+ EXPECT_EQ(MftH264Decoder::kNormal, decoder->state());
+ handler.SetReader(reader);
+ handler.SetDecoder(decoder.get());
+ while (MftH264Decoder::kStopped != decoder->state()) {
+ scoped_refptr<VideoFrame> frame;
+ decoder->ProduceVideoFrame(frame);
+ }
+ EXPECT_EQ(0, handler.flush_count_);
+ int old_flush_count = handler.flush_count_;
+ decoder->Flush();
+ EXPECT_EQ(old_flush_count, handler.flush_count_);
+ decoder->Uninitialize();
+}
+
+FilePath GetVideoFilePath(const std::string& file_name) {
+ FilePath path;
+ PathService::Get(base::DIR_SOURCE_ROOT, &path);
+ path = path.AppendASCII("media")
+ .AppendASCII("test")
+ .AppendASCII("data")
+ .AppendASCII(file_name.c_str());
+ return path;
+}
+
+void DecodeValidVideo(const std::string& filename, int num_frames, bool dxva) {
+ scoped_refptr<FFmpegFileReaderWrapper> reader(new FFmpegFileReaderWrapper());
+ ASSERT_TRUE(reader.get());
+ FilePath path = GetVideoFilePath(filename);
+ ASSERT_TRUE(file_util::PathExists(path));
+ ASSERT_TRUE(reader->InitReader(WideToASCII(path.value())));
+ int actual_width;
+ int actual_height;
+ ASSERT_TRUE(reader->GetWidth(&actual_width));
+ ASSERT_TRUE(reader->GetHeight(&actual_height));
+
+ MessageLoop loop;
+ SimpleMftH264DecoderHandler handler;
+ VideoCodecConfig config;
+ config.width = 1;
+ config.height = 1;
+ HWND hwnd = CreateDrawWindow(config.width, config.height);
+ ASSERT_TRUE(hwnd);
+ scoped_ptr<MftH264Decoder> decoder(new MftH264Decoder(dxva, hwnd));
+ ASSERT_TRUE(decoder.get());
+ decoder->Initialize(&loop, &handler, NULL, config);
+ EXPECT_EQ(MftH264Decoder::kNormal, decoder->state());
+ handler.SetReader(reader);
+ handler.SetDecoder(decoder.get());
+ while (MftH264Decoder::kStopped != decoder->state()) {
+ scoped_refptr<VideoFrame> frame;
+ decoder->ProduceVideoFrame(frame);
+ }
+
+ // We expect a format change when decoder receives enough data to determine
+ // the actual frame width/height.
+ EXPECT_GT(handler.format_change_count_, 0);
+ EXPECT_EQ(actual_width, handler.info_.stream_info.surface_width);
+ EXPECT_EQ(actual_height, handler.info_.stream_info.surface_height);
+ EXPECT_GE(handler.empty_buffer_callback_count_, num_frames);
+ EXPECT_EQ(num_frames, handler.fill_buffer_callback_count_ - 1);
+ decoder->Uninitialize();
+ DestroyWindow(hwnd);
+}
+
+TEST_F(MftH264DecoderTest, DecodeValidVideoDxva) {
+ DecodeValidVideo("bear.1280x720.mp4", 82, true);
+}
+
+TEST_F(MftH264DecoderTest, DecodeValidVideoNoDxva) {
+ DecodeValidVideo("bear.1280x720.mp4", 82, false);
+}
+
+} // namespace media
diff --git a/media/mf/test/run_all_unittests.cc b/media/mf/test/run_all_unittests.cc
new file mode 100644
index 0000000..4126108
--- /dev/null
+++ b/media/mf/test/run_all_unittests.cc
@@ -0,0 +1,26 @@
+// Copyright (c) 2010 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/file_path.h"
+#include "base/test/test_suite.h"
+#include "media/base/media.h"
+#include "media/ffmpeg/ffmpeg_common.h"
+#include "media/ffmpeg/file_protocol.h"
+
+static bool InitFFmpeg() {
+ if (!media::InitializeMediaLibrary(FilePath()))
+ return false;
+ avcodec_init();
+ av_register_all();
+ av_register_protocol2(&kFFmpegFileProtocol, sizeof(kFFmpegFileProtocol));
+ return true;
+}
+
+int main(int argc, char** argv) {
+ if (!InitFFmpeg()) {
+ fprintf(stderr, "Failed to init ffmpeg\n");
+ return -1;
+ }
+ return TestSuite(argc, argv).Run();
+}
diff --git a/media/video/mft_h264_decode_engine_context.cc b/media/video/mft_h264_decode_engine_context.cc
new file mode 100644
index 0000000..1759ced
--- /dev/null
+++ b/media/video/mft_h264_decode_engine_context.cc
@@ -0,0 +1,179 @@
+// Copyright (c) 2010 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "media/video/mft_h264_decode_engine_context.h"
+
+#include <algorithm>
+#include <vector>
+
+#include <d3d9.h>
+
+#include "base/task.h"
+#include "media/base/callback.h"
+
+#pragma comment(lib, "dxva2.lib")
+#pragma comment(lib, "d3d9.lib")
+
+using base::TimeDelta;
+
+namespace media {
+
+static D3DFORMAT VideoFrameToD3DFormat(VideoFrame::Format format) {
+ switch (format) {
+ case VideoFrame::RGB555:
+ return D3DFMT_X1R5G5B5;
+ case VideoFrame::RGB565:
+ return D3DFMT_R5G6B5;
+ case VideoFrame::RGB32:
+ return D3DFMT_X8R8G8B8;
+ case VideoFrame::RGBA:
+ return D3DFMT_A8R8G8B8;
+ default:
+ // Note that although there is a corresponding type for VideoFrame::RGB24
+ // (D3DFMT_R8G8B8), it is not supported by render targets.
+ NOTREACHED() << "Unsupported format";
+ return D3DFMT_UNKNOWN;
+ }
+}
+
+static IDirect3DTexture9* GetTexture(scoped_refptr<VideoFrame> frame) {
+ return static_cast<IDirect3DTexture9*>(frame->d3d_texture(0));
+}
+
+static void ReleaseTexture(scoped_refptr<VideoFrame> frame) {
+ GetTexture(frame)->Release();
+}
+
+static void ReleaseTextures(
+ const std::vector<scoped_refptr<VideoFrame> >& frames) {
+ std::for_each(frames.begin(), frames.end(), ReleaseTexture);
+}
+
+MftH264DecodeEngineContext::MftH264DecodeEngineContext(HWND device_window)
+ : initialized_(false),
+ device_window_(device_window),
+ d3d9_(NULL),
+ device_(NULL) {
+ DCHECK(device_window);
+}
+
+MftH264DecodeEngineContext::~MftH264DecodeEngineContext() {
+}
+
+// TODO(imcheng): This should set the success variable once the API is
+// finalized.
+void MftH264DecodeEngineContext::Initialize(Task* task) {
+ AutoTaskRunner runner(task);
+ if (initialized_)
+ return;
+ d3d9_ = Direct3DCreate9(D3D_SDK_VERSION);
+ if (!d3d9_) {
+ LOG(ERROR) << "Direct3DCreate9 failed";
+ return;
+ }
+
+ D3DPRESENT_PARAMETERS present_params = {0};
+ present_params.BackBufferWidth = 0;
+ present_params.BackBufferHeight = 0;
+ present_params.BackBufferFormat = D3DFMT_UNKNOWN;
+ present_params.BackBufferCount = 1;
+ present_params.SwapEffect = D3DSWAPEFFECT_DISCARD;
+ present_params.hDeviceWindow = device_window_;
+ present_params.Windowed = TRUE;
+ present_params.Flags = D3DPRESENTFLAG_VIDEO;
+ present_params.FullScreen_RefreshRateInHz = 0;
+ present_params.PresentationInterval = 0;
+
+ HRESULT hr = d3d9_->CreateDevice(D3DADAPTER_DEFAULT,
+ D3DDEVTYPE_HAL,
+ device_window_,
+ (D3DCREATE_HARDWARE_VERTEXPROCESSING |
+ D3DCREATE_MULTITHREADED),
+ &present_params,
+ device_.Receive());
+ if (FAILED(hr)) {
+ LOG(ERROR) << "CreateDevice failed " << std::hex << hr;
+ return;
+ }
+ initialized_ = true;
+}
+
+void* MftH264DecodeEngineContext::GetDevice() {
+ return device_.get();
+}
+
+void MftH264DecodeEngineContext::AllocateVideoFrames(
+ int n, size_t width, size_t height, VideoFrame::Format format,
+ std::vector<scoped_refptr<VideoFrame> >* frames,
+ Task* task) {
+ DCHECK(initialized_);
+ DCHECK_GT(n, 0);
+ DCHECK(frames);
+
+ AutoTaskRunner runner(task);
+ D3DFORMAT d3d_format = VideoFrameToD3DFormat(format);
+ std::vector<scoped_refptr<VideoFrame> > temp_frames;
+ temp_frames.reserve(n);
+ HRESULT hr;
+ for (int i = 0; i < n; i++) {
+ IDirect3DTexture9* texture = NULL;
+ hr = device_->CreateTexture(width, height, 1, D3DUSAGE_RENDERTARGET,
+ d3d_format, D3DPOOL_DEFAULT, &texture, NULL);
+ if (FAILED(hr)) {
+ LOG(ERROR) << "CreateTexture " << i << " failed " << std::hex << hr;
+ ReleaseTextures(temp_frames);
+ return;
+ }
+ VideoFrame::D3dTexture texture_array[VideoFrame::kMaxPlanes] =
+ { texture, texture, texture };
+ scoped_refptr<VideoFrame> texture_frame;
+ VideoFrame::CreateFrameD3dTexture(format, width, height, texture_array,
+ TimeDelta(), TimeDelta(), &texture_frame);
+ if (!texture_frame.get()) {
+ LOG(ERROR) << "CreateFrameD3dTexture " << i << " failed";
+ texture->Release();
+ ReleaseTextures(temp_frames);
+ return;
+ }
+ temp_frames.push_back(texture_frame);
+ }
+ frames->assign(temp_frames.begin(), temp_frames.end());
+ managed_frames_.insert(managed_frames_.end(),
+ temp_frames.begin(), temp_frames.end());
+}
+
+bool MftH264DecodeEngineContext::UploadToVideoFrame(
+ void* source, scoped_refptr<VideoFrame> frame) {
+ DCHECK(initialized_);
+ DCHECK(source);
+ DCHECK(frame.get());
+
+ IDirect3DSurface9* surface = static_cast<IDirect3DSurface9*>(source);
+ IDirect3DTexture9* texture = GetTexture(frame);
+ ScopedComPtr<IDirect3DSurface9> top_surface;
+ HRESULT hr;
+ hr = texture->GetSurfaceLevel(0, top_surface.Receive());
+ if (FAILED(hr)) {
+ LOG(ERROR) << "GetSurfaceLevel failed " << std::hex << hr;
+ return false;
+ }
+ hr = device_->StretchRect(surface, NULL, top_surface.get(), NULL,
+ D3DTEXF_NONE);
+ if (FAILED(hr)) {
+ LOG(ERROR) << "StretchRect failed " << std::hex << hr;
+ return false;
+ }
+ return true;
+}
+
+void MftH264DecodeEngineContext::ReleaseAllVideoFrames() {
+ ReleaseTextures(managed_frames_);
+ managed_frames_.clear();
+}
+
+void MftH264DecodeEngineContext::Destroy(Task* task) {
+ AutoTaskRunner runner(task);
+}
+
+} // namespace media
diff --git a/media/video/mft_h264_decode_engine_context.h b/media/video/mft_h264_decode_engine_context.h
new file mode 100644
index 0000000..d33f06c
--- /dev/null
+++ b/media/video/mft_h264_decode_engine_context.h
@@ -0,0 +1,70 @@
+// Copyright (c) 2010 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+//
+// Video decode context for MftH264DecodeEngine. This context manages
+// VideoFrame objects for the DXVA-enabled MFT H.264 decode engine, and
+// converts its output (which is IDirect3DSurface9) into IDirect3DTexture9
+// (wrapped in a VideoFrame object), which will be compatible with ANGLE.
+
+#ifndef MEDIA_VIDEO_MFT_H264_DECODE_ENGINE_CONTEXT_H_
+#define MEDIA_VIDEO_MFT_H264_DECODE_ENGINE_CONTEXT_H_
+
+#include <vector>
+
+#include "base/scoped_comptr_win.h"
+#include "media/base/video_frame.h"
+#include "media/video/video_decode_context.h"
+
+class Task;
+
+struct IDirect3D9;
+extern "C" const GUID IID_IDirect3D9;
+struct IDirect3DDevice9;
+extern "C" const GUID IID_IDirect3DDevice9;
+
+namespace media {
+
+// TODO(imcheng): Make it implement VideoDecodeContext once the API
+// is finalized.
+class MftH264DecodeEngineContext {
+ public:
+ // Constructs a MftH264DecodeEngineContext with the D3D device attached
+ // to |device_window|. This device does not own the window, so the caller
+ // must destroy the window explicitly after the destruction of this object.
+ explicit MftH264DecodeEngineContext(HWND device_window);
+ virtual ~MftH264DecodeEngineContext();
+
+ // TODO(imcheng): Is this a part of the API?
+ virtual void Initialize(Task* task);
+
+ // Gets the underlying IDirect3DDevice9.
+ virtual void* GetDevice();
+
+ // Allocates IDirect3DTexture9 objects wrapped in VideoFrame objects.
+ virtual void AllocateVideoFrames(
+ int n, size_t width, size_t height, VideoFrame::Format format,
+ std::vector<scoped_refptr<VideoFrame> >* frames,
+ Task* task);
+
+ // TODO(imcheng): Make this follow the API once it is finalized.
+ // Uploads the decoded frame (IDirect3DSurface9) to a VideoFrame allocated
+ // by AllocateVideoFrames().
+ virtual bool UploadToVideoFrame(void* source,
+ scoped_refptr<VideoFrame> frame);
+ virtual void ReleaseAllVideoFrames();
+ virtual void Destroy(Task* task);
+
+ bool initialized() const { return initialized_; }
+
+ private:
+ bool initialized_;
+ HWND device_window_;
+ std::vector<scoped_refptr<VideoFrame> > managed_frames_;
+ ScopedComPtr<IDirect3D9, &IID_IDirect3D9> d3d9_;
+ ScopedComPtr<IDirect3DDevice9, &IID_IDirect3DDevice9> device_;
+};
+
+} // namespace media
+
+#endif // MEDIA_VIDEO_MFT_H264_DECODE_ENGINE_CONTEXT_H_
diff --git a/media/video/mft_h264_decode_engine_unittest.cc b/media/video/mft_h264_decode_engine_unittest.cc
deleted file mode 100644
index fcf7d69..0000000
--- a/media/video/mft_h264_decode_engine_unittest.cc
+++ /dev/null
@@ -1,410 +0,0 @@
-// Copyright (c) 2010 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "base/file_path.h"
-#include "base/file_util.h"
-#include "base/message_loop.h"
-#include "base/path_service.h"
-#include "base/ref_counted.h"
-#include "base/scoped_ptr.h"
-#include "base/string_util.h"
-#include "base/time.h"
-#include "media/base/data_buffer.h"
-#include "media/base/video_frame.h"
-#include "media/tools/mft_h264_example/file_reader_util.h"
-#include "media/video/mft_h264_decode_engine.h"
-#include "media/video/mft_h264_decode_engine_context.h"
-#include "testing/gtest/include/gtest/gtest.h"
-
-using base::TimeDelta;
-
-namespace media {
-
-static const int kDecoderMaxWidth = 1920;
-static const int kDecoderMaxHeight = 1088;
-
-// Helper classes
-
-class BaseMftReader : public base::RefCountedThreadSafe<BaseMftReader> {
- public:
- virtual ~BaseMftReader() {}
- virtual void ReadCallback(scoped_refptr<DataBuffer>* input) = 0;
-};
-
-class FakeMftReader : public BaseMftReader {
- public:
- FakeMftReader() : frames_remaining_(20) {}
- explicit FakeMftReader(int count) : frames_remaining_(count) {}
- virtual ~FakeMftReader() {}
-
- // Provides garbage input to the decoder.
- virtual void ReadCallback(scoped_refptr<DataBuffer>* input) {
- if (frames_remaining_ > 0) {
- int sz = 4096;
- uint8* buf = new uint8[sz];
- memset(buf, 42, sz);
- *input = new DataBuffer(buf, sz);
- (*input)->SetDuration(base::TimeDelta::FromMicroseconds(5000));
- (*input)->SetTimestamp(
- base::TimeDelta::FromMicroseconds(
- 50000000 - frames_remaining_ * 10000));
- --frames_remaining_;
- } else {
- // Emulate end of stream on the last "frame".
- *input = new DataBuffer(0);
- }
- }
- int frames_remaining() const { return frames_remaining_; }
-
- private:
- int frames_remaining_;
-};
-
-class SimpleMftH264DecodeEngineHandler
- : public VideoDecodeEngine::EventHandler {
- public:
- SimpleMftH264DecodeEngineHandler()
- : init_count_(0),
- uninit_count_(0),
- flush_count_(0),
- format_change_count_(0),
- empty_buffer_callback_count_(0),
- fill_buffer_callback_count_(0) {
- memset(&info_, 0, sizeof(info_));
- }
- virtual ~SimpleMftH264DecodeEngineHandler() {}
- virtual void OnInitializeComplete(const VideoCodecInfo& info) {
- info_ = info;
- init_count_++;
- }
- virtual void OnUninitializeComplete() {
- uninit_count_++;
- }
- virtual void OnFlushComplete() {
- flush_count_++;
- }
- virtual void OnSeekComplete() {}
- virtual void OnError() {}
- virtual void OnFormatChange(VideoStreamInfo stream_info) {
- format_change_count_++;
- info_.stream_info = stream_info;
- }
- virtual void ProduceVideoSample(scoped_refptr<Buffer> buffer) {
- if (reader_.get() && decoder_) {
- empty_buffer_callback_count_++;
- scoped_refptr<DataBuffer> input;
- reader_->ReadCallback(&input);
- decoder_->ConsumeVideoSample(input);
- }
- }
- virtual void ConsumeVideoFrame(scoped_refptr<VideoFrame> frame) {
- fill_buffer_callback_count_++;
- current_frame_ = frame;
- }
- void SetReader(scoped_refptr<BaseMftReader> reader) {
- reader_ = reader;
- }
- void SetDecodeEngine(MftH264DecodeEngine* decoder) {
- decoder_ = decoder;
- }
-
- int init_count_;
- int uninit_count_;
- int flush_count_;
- int format_change_count_;
- int empty_buffer_callback_count_;
- int fill_buffer_callback_count_;
- VideoCodecInfo info_;
- scoped_refptr<BaseMftReader> reader_;
- MftH264DecodeEngine* decoder_;
- scoped_refptr<VideoFrame> current_frame_;
-};
-
-class FFmpegFileReaderWrapper : public BaseMftReader {
- public:
- FFmpegFileReaderWrapper() {}
- virtual ~FFmpegFileReaderWrapper() {}
- bool InitReader(const std::string& filename) {
- reader_.reset(new FFmpegFileReader(filename));
- if (!reader_.get() || !reader_->Initialize()) {
- reader_.reset();
- return false;
- }
- return true;
- }
- virtual void ReadCallback(scoped_refptr<DataBuffer>* input) {
- if (reader_.get()) {
- reader_->Read(input);
- }
- }
- bool GetWidth(int* width) {
- if (!reader_.get())
- return false;
- return reader_->GetWidth(width);
- }
- bool GetHeight(int* height) {
- if (!reader_.get())
- return false;
- return reader_->GetHeight(height);
- }
- scoped_ptr<FFmpegFileReader> reader_;
-};
-
-// Helper functions
-
-static FilePath GetVideoFilePath(const std::string& file_name) {
- FilePath path;
- PathService::Get(base::DIR_SOURCE_ROOT, &path);
- path = path.AppendASCII("media")
- .AppendASCII("test")
- .AppendASCII("data")
- .AppendASCII(file_name.c_str());
- return path;
-}
-
-class MftH264DecodeEngineTest : public testing::Test {
- protected:
- MftH264DecodeEngineTest()
- : loop_(),
- window_(NULL),
- handler_(NULL),
- engine_(NULL),
- context_(NULL) {
- }
- virtual ~MftH264DecodeEngineTest() {}
- virtual void SetUp() {
- handler_.reset(new SimpleMftH264DecodeEngineHandler());
- }
- virtual void TearDown() {
- if (context_.get()) {
- context_->ReleaseAllVideoFrames();
- context_->Destroy(NULL);
- }
- if (window_)
- DestroyWindow(window_);
- }
- void GetDecodeEngine(bool dxva) {
- if (dxva) {
- if (!window_)
- CreateDrawWindow();
- context_.reset(new MftH264DecodeEngineContext(window_));
- ASSERT_TRUE(context_.get());
- context_->Initialize(NULL);
- ASSERT_TRUE(context_->initialized());
- }
- engine_.reset(new MftH264DecodeEngine(dxva));
- ASSERT_TRUE(engine_.get());
- }
- void InitDecodeEngine(int width, int height) {
- VideoCodecConfig config;
- config.width = width;
- config.height = height;
-
- // Note that although |config| is passed as reference, |config| is copied
- // into the decode engine, so it is okay to make |config| a local variable.
- engine_->Initialize(&loop_, handler_.get(), context_.get(), config);
- EXPECT_EQ(1, handler_->init_count_);
- EXPECT_EQ(MftH264DecodeEngine::kNormal, engine_->state());
- }
- void InitDecodeEngine() {
- InitDecodeEngine(800, 600);
- }
- void TestInitAndUninit(bool dxva) {
- GetDecodeEngine(dxva);
- InitDecodeEngine();
- engine_->Uninitialize();
- }
- void DecodeAll(scoped_refptr<BaseMftReader> reader) {
- handler_->SetReader(reader);
- handler_->SetDecodeEngine(engine_.get());
- while (MftH264DecodeEngine::kStopped != engine_->state()) {
- scoped_refptr<VideoFrame> frame;
- engine_->ProduceVideoFrame(frame);
- }
- }
- void DecodeValidVideo(const std::string& filename, int num_frames,
- bool dxva) {
- scoped_refptr<FFmpegFileReaderWrapper> reader(
- new FFmpegFileReaderWrapper());
- ASSERT_TRUE(reader.get());
- FilePath path = GetVideoFilePath(filename);
- ASSERT_TRUE(file_util::PathExists(path));
- ASSERT_TRUE(reader->InitReader(WideToASCII(path.value())));
- int actual_width;
- int actual_height;
- ASSERT_TRUE(reader->GetWidth(&actual_width));
- ASSERT_TRUE(reader->GetHeight(&actual_height));
-
- VideoCodecConfig config;
- CreateDrawWindow(config.width, config.height);
- GetDecodeEngine(dxva);
- InitDecodeEngine();
- DecodeAll(reader);
-
- // We expect a format change when decoder receives enough data to determine
- // the actual frame width/height.
- EXPECT_GT(handler_->format_change_count_, 0);
- EXPECT_EQ(actual_width, handler_->info_.stream_info.surface_width);
- EXPECT_EQ(actual_height, handler_->info_.stream_info.surface_height);
- EXPECT_GE(handler_->empty_buffer_callback_count_, num_frames);
- EXPECT_EQ(num_frames, handler_->fill_buffer_callback_count_ - 1);
- engine_->Uninitialize();
- }
- void ExpectDefaultDimensionsOnInput(int width, int height) {
- GetDecodeEngine(false);
- InitDecodeEngine(width, height);
- EXPECT_EQ(kDecoderMaxWidth, handler_->info_.stream_info.surface_width);
- EXPECT_EQ(kDecoderMaxHeight, handler_->info_.stream_info.surface_height);
- engine_->Uninitialize();
- }
-
- scoped_ptr<SimpleMftH264DecodeEngineHandler> handler_;
- scoped_ptr<MftH264DecodeEngine> engine_;
- scoped_ptr<MftH264DecodeEngineContext> context_;
-
- private:
- void CreateDrawWindow(int width, int height) {
- static const wchar_t kClassName[] = L"Test";
- static const wchar_t kWindowTitle[] = L"MFT Unittest Draw Window";
- WNDCLASS window_class = {0};
- window_class.lpszClassName = kClassName;
- window_class.hInstance = NULL;
- window_class.hbrBackground = 0;
- window_class.lpfnWndProc = DefWindowProc;
- window_class.hCursor = 0;
- RegisterClass(&window_class);
- window_ = CreateWindow(kClassName,
- kWindowTitle,
- (WS_OVERLAPPEDWINDOW | WS_VISIBLE) &
- ~(WS_MAXIMIZEBOX | WS_THICKFRAME),
- 100, 100, width, height,
- NULL, NULL, NULL, NULL);
- ASSERT_TRUE(window_);
- }
- void CreateDrawWindow() {
- CreateDrawWindow(800, 600);
- }
-
- MessageLoop loop_;
- HWND window_;
-};
-
-// A simple test case for init/deinit of MF/COM libraries.
-TEST_F(MftH264DecodeEngineTest, LibraryInit) {
- EXPECT_TRUE(MftH264DecodeEngine::StartupComLibraries());
- MftH264DecodeEngine::ShutdownComLibraries();
-}
-
-TEST_F(MftH264DecodeEngineTest, DecoderUninitializedAtFirst) {
- GetDecodeEngine(true);
- EXPECT_EQ(MftH264DecodeEngine::kUninitialized, engine_->state());
-}
-
-TEST_F(MftH264DecodeEngineTest, DecoderInitMissingArgs) {
- VideoCodecConfig config;
- GetDecodeEngine(false);
- engine_->Initialize(NULL, NULL, NULL, config);
- EXPECT_EQ(MftH264DecodeEngine::kUninitialized, engine_->state());
-}
-
-TEST_F(MftH264DecodeEngineTest, DecoderInitNoDxva) {
- TestInitAndUninit(false);
-}
-
-TEST_F(MftH264DecodeEngineTest, DecoderInitDxva) {
- TestInitAndUninit(true);
-}
-
-TEST_F(MftH264DecodeEngineTest, DecoderUninit) {
- TestInitAndUninit(false);
- EXPECT_EQ(1, handler_->uninit_count_);
- EXPECT_EQ(MftH264DecodeEngine::kUninitialized, engine_->state());
-}
-
-TEST_F(MftH264DecodeEngineTest, UninitBeforeInit) {
- GetDecodeEngine(false);
- engine_->Uninitialize();
- EXPECT_EQ(0, handler_->uninit_count_);
-}
-
-TEST_F(MftH264DecodeEngineTest, InitWithNegativeDimensions) {
- ExpectDefaultDimensionsOnInput(-123, -456);
-}
-
-TEST_F(MftH264DecodeEngineTest, InitWithTooHighDimensions) {
- ExpectDefaultDimensionsOnInput(kDecoderMaxWidth + 1, kDecoderMaxHeight + 1);
-}
-
-TEST_F(MftH264DecodeEngineTest, DrainOnEmptyBuffer) {
- GetDecodeEngine(false);
- InitDecodeEngine();
-
- // Decoder should switch to drain mode because of this NULL buffer, and then
- // switch to kStopped when it says it needs more input during drain mode.
- scoped_refptr<Buffer> buffer(new DataBuffer(0));
- engine_->ConsumeVideoSample(buffer);
- EXPECT_EQ(MftH264DecodeEngine::kStopped, engine_->state());
-
- // Should have called back with one empty frame.
- EXPECT_EQ(1, handler_->fill_buffer_callback_count_);
- ASSERT_TRUE(handler_->current_frame_.get());
- EXPECT_EQ(VideoFrame::EMPTY, handler_->current_frame_->format());
- engine_->Uninitialize();
-}
-
-TEST_F(MftH264DecodeEngineTest, NoOutputOnGarbageInput) {
- // 100 samples of garbage.
- const int kNumFrames = 100;
- scoped_refptr<FakeMftReader> reader(new FakeMftReader(kNumFrames));
- ASSERT_TRUE(reader.get());
-
- GetDecodeEngine(false);
- InitDecodeEngine();
- DecodeAll(reader);
-
- // Output callback should only be invoked once - the empty frame to indicate
- // end of stream.
- EXPECT_EQ(1, handler_->fill_buffer_callback_count_);
- ASSERT_TRUE(handler_->current_frame_.get());
- EXPECT_EQ(VideoFrame::EMPTY, handler_->current_frame_->format());
-
- // One extra count because of the end of stream NULL sample.
- EXPECT_EQ(kNumFrames, handler_->empty_buffer_callback_count_ - 1);
- engine_->Uninitialize();
-}
-
-TEST_F(MftH264DecodeEngineTest, FlushAtStart) {
- GetDecodeEngine(false);
- InitDecodeEngine();
- engine_->Flush();
-
- // Flush should succeed even if input/output are empty.
- EXPECT_EQ(1, handler_->flush_count_);
- engine_->Uninitialize();
-}
-
-TEST_F(MftH264DecodeEngineTest, NoFlushAtStopped) {
- scoped_refptr<BaseMftReader> reader(new FakeMftReader());
- ASSERT_TRUE(reader.get());
-
- GetDecodeEngine(false);
- InitDecodeEngine();
- DecodeAll(reader);
-
- EXPECT_EQ(0, handler_->flush_count_);
- int old_flush_count = handler_->flush_count_;
- engine_->Flush();
- EXPECT_EQ(old_flush_count, handler_->flush_count_);
- engine_->Uninitialize();
-}
-
-TEST_F(MftH264DecodeEngineTest, DecodeValidVideoDxva) {
- DecodeValidVideo("bear.1280x720.mp4", 82, true);
-}
-
-TEST_F(MftH264DecodeEngineTest, DecodeValidVideoNoDxva) {
- DecodeValidVideo("bear.1280x720.mp4", 82, false);
-}
-
-} // namespace media