summaryrefslogtreecommitdiffstats
path: root/media/video
diff options
context:
space:
mode:
authorhclam@chromium.org <hclam@chromium.org@0039d316-1c4b-4281-b951-d872f2087c98>2010-10-08 07:30:18 +0000
committerhclam@chromium.org <hclam@chromium.org@0039d316-1c4b-4281-b951-d872f2087c98>2010-10-08 07:30:18 +0000
commitb6e4b4f99f9b1833d6b1d11ae03da42158549670 (patch)
treea9f67c65ec2bd94a8165b9b40b369bc2178dc5aa /media/video
parent9ba0d9f42d948e925dee89b9f1f4b6d061ab2dc1 (diff)
downloadchromium_src-b6e4b4f99f9b1833d6b1d11ae03da42158549670.zip
chromium_src-b6e4b4f99f9b1833d6b1d11ae03da42158549670.tar.gz
chromium_src-b6e4b4f99f9b1833d6b1d11ae03da42158549670.tar.bz2
Resubmit - move MFT h264 code to media/video and connect to gpu video decoder.
TBR=scherkus BUG=None TEST=None Review URL: http://codereview.chromium.org/3591020 git-svn-id: svn://svn.chromium.org/chrome/trunk/src@61925 0039d316-1c4b-4281-b951-d872f2087c98
Diffstat (limited to 'media/video')
-rw-r--r--media/video/mft_h264_decode_engine.cc696
-rw-r--r--media/video/mft_h264_decode_engine.h102
-rw-r--r--media/video/mft_h264_decode_engine_context.cc179
-rw-r--r--media/video/mft_h264_decode_engine_context.h70
-rw-r--r--media/video/mft_h264_decode_engine_unittest.cc410
5 files changed, 1208 insertions, 249 deletions
diff --git a/media/video/mft_h264_decode_engine.cc b/media/video/mft_h264_decode_engine.cc
new file mode 100644
index 0000000..ea59ad6
--- /dev/null
+++ b/media/video/mft_h264_decode_engine.cc
@@ -0,0 +1,696 @@
+// Copyright (c) 2010 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "media/video/mft_h264_decode_engine.h"
+
+#include <d3d9.h>
+#include <dxva2api.h>
+#include <initguid.h>
+#include <mfapi.h>
+// Placed after mfapi.h to avoid linking strmiids.lib for MR_BUFFER_SERVICE.
+#include <evr.h>
+#include <mferror.h>
+#include <wmcodecdsp.h>
+
+#include "base/time.h"
+#include "base/message_loop.h"
+#include "media/base/limits.h"
+#include "media/video/video_decode_context.h"
+
+#pragma comment(lib, "dxva2.lib")
+#pragma comment(lib, "d3d9.lib")
+#pragma comment(lib, "mf.lib")
+#pragma comment(lib, "mfplat.lib")
+
+using base::TimeDelta;
+
+namespace {
+
+// Creates an empty Media Foundation sample with no buffers.
+static IMFSample* CreateEmptySample() {
+ HRESULT hr;
+ ScopedComPtr<IMFSample> sample;
+ hr = MFCreateSample(sample.Receive());
+ if (FAILED(hr)) {
+ LOG(ERROR) << "Unable to create an empty sample";
+ return NULL;
+ }
+ return sample.Detach();
+}
+
+// Creates a Media Foundation sample with one buffer of length |buffer_length|
+// on a |align|-byte boundary. Alignment must be a perfect power of 2 or 0.
+// If |align| is 0, then no alignment is specified.
+static IMFSample* CreateEmptySampleWithBuffer(int buffer_length, int align) {
+ CHECK_GT(buffer_length, 0);
+ ScopedComPtr<IMFSample> sample;
+ sample.Attach(CreateEmptySample());
+ if (!sample.get())
+ return NULL;
+ ScopedComPtr<IMFMediaBuffer> buffer;
+ HRESULT hr;
+ if (align == 0) {
+ // Note that MFCreateMemoryBuffer is same as MFCreateAlignedMemoryBuffer
+ // with the align argument being 0.
+ hr = MFCreateMemoryBuffer(buffer_length, buffer.Receive());
+ } else {
+ hr = MFCreateAlignedMemoryBuffer(buffer_length,
+ align - 1,
+ buffer.Receive());
+ }
+ if (FAILED(hr)) {
+ LOG(ERROR) << "Unable to create an empty buffer";
+ return NULL;
+ }
+ hr = sample->AddBuffer(buffer.get());
+ if (FAILED(hr)) {
+ LOG(ERROR) << "Failed to add empty buffer to sample";
+ return NULL;
+ }
+ return sample.Detach();
+}
+
+// Creates a Media Foundation sample with one buffer containing a copy of the
+// given Annex B stream data.
+// If duration and sample time are not known, provide 0.
+// |min_size| specifies the minimum size of the buffer (might be required by
+// the decoder for input). The times here should be given in 100ns units.
+// |alignment| specifies the buffer in the sample to be aligned. If no
+// alignment is required, provide 0 or 1.
+static IMFSample* CreateInputSample(const uint8* stream, int size,
+ int64 timestamp, int64 duration,
+ int min_size, int alignment) {
+ CHECK(stream);
+ CHECK_GT(size, 0);
+ ScopedComPtr<IMFSample> sample;
+ sample.Attach(CreateEmptySampleWithBuffer(std::max(min_size, size),
+ alignment));
+ if (!sample.get()) {
+ LOG(ERROR) << "Failed to create empty buffer for input";
+ return NULL;
+ }
+ HRESULT hr;
+ if (duration > 0) {
+ hr = sample->SetSampleDuration(duration);
+ if (FAILED(hr)) {
+ LOG(ERROR) << "Failed to set sample duration";
+ return NULL;
+ }
+ }
+ if (timestamp > 0) {
+ hr = sample->SetSampleTime(timestamp);
+ if (FAILED(hr)) {
+ LOG(ERROR) << "Failed to set sample time";
+ return NULL;
+ }
+ }
+ ScopedComPtr<IMFMediaBuffer> buffer;
+ hr = sample->GetBufferByIndex(0, buffer.Receive());
+ if (FAILED(hr)) {
+ LOG(ERROR) << "Failed to get buffer in sample";
+ return NULL;
+ }
+ DWORD max_length, current_length;
+ uint8* destination;
+ hr = buffer->Lock(&destination, &max_length, &current_length);
+ if (FAILED(hr)) {
+ LOG(ERROR) << "Failed to lock buffer";
+ return NULL;
+ }
+ CHECK_EQ(current_length, 0u);
+ CHECK_GE(static_cast<int>(max_length), size);
+ memcpy(destination, stream, size);
+ CHECK(SUCCEEDED(buffer->Unlock()));
+ hr = buffer->SetCurrentLength(size);
+ if (FAILED(hr)) {
+ LOG(ERROR) << "Failed to set current length to " << size;
+ return NULL;
+ }
+ LOG(INFO) << __FUNCTION__ << " wrote " << size << " bytes into input sample";
+ return sample.Detach();
+}
+
+const GUID ConvertVideoFrameFormatToGuid(media::VideoFrame::Format format) {
+ switch (format) {
+ case media::VideoFrame::NV12:
+ return MFVideoFormat_NV12;
+ case media::VideoFrame::YV12:
+ return MFVideoFormat_YV12;
+ default:
+ NOTREACHED() << "Unsupported VideoFrame format";
+ return GUID_NULL;
+ }
+ NOTREACHED();
+ return GUID_NULL;
+}
+
+} // namespace
+
+namespace media {
+
+// public methods
+
+MftH264DecodeEngine::MftH264DecodeEngine(bool use_dxva)
+ : use_dxva_(use_dxva),
+ state_(kUninitialized),
+ event_handler_(NULL) {
+ memset(&input_stream_info_, 0, sizeof(input_stream_info_));
+ memset(&output_stream_info_, 0, sizeof(output_stream_info_));
+ memset(&config_, 0, sizeof(config_));
+ memset(&info_, 0, sizeof(info_));
+}
+
+MftH264DecodeEngine::~MftH264DecodeEngine() {
+}
+
+void MftH264DecodeEngine::Initialize(
+ MessageLoop* message_loop,
+ VideoDecodeEngine::EventHandler* event_handler,
+ VideoDecodeContext* context,
+ const VideoCodecConfig& config) {
+ DCHECK(!use_dxva_ || context);
+ if (state_ != kUninitialized) {
+ LOG(ERROR) << "Initialize: invalid state";
+ return;
+ }
+ if (!message_loop || !event_handler) {
+ LOG(ERROR) << "MftH264DecodeEngine::Initialize: parameters cannot be NULL";
+ return;
+ }
+ context_ = context;
+ config_ = config;
+ event_handler_ = event_handler;
+ info_.provides_buffers = true;
+
+ if (use_dxva_) {
+ info_.stream_info.surface_format = VideoFrame::NV12;
+ // TODO(hclam): Need to correct this since this is not really GL texture.
+ // We should just remove surface_type from stream_info.
+ info_.stream_info.surface_type = VideoFrame::TYPE_GL_TEXTURE;
+ } else {
+ info_.stream_info.surface_format = VideoFrame::YV12;
+ info_.stream_info.surface_type = VideoFrame::TYPE_SYSTEM_MEMORY;
+ }
+
+ // codec_info.stream_info_.surface_width_/height_ are initialized
+ // in InitInternal().
+ info_.success = InitInternal();
+ if (info_.success) {
+ state_ = kNormal;
+ AllocFramesFromContext();
+ } else {
+ LOG(ERROR) << "MftH264DecodeEngine::Initialize failed";
+ event_handler_->OnInitializeComplete(info_);
+ }
+}
+
+void MftH264DecodeEngine::Uninitialize() {
+ if (state_ == kUninitialized) {
+ LOG(ERROR) << "Uninitialize: invalid state";
+ return;
+ }
+
+ // TODO(hclam): Call ShutdownComLibraries only after MFT is released.
+ decode_engine_.Release();
+ ShutdownComLibraries();
+ state_ = kUninitialized;
+ event_handler_->OnUninitializeComplete();
+}
+
+void MftH264DecodeEngine::Flush() {
+ if (state_ != kNormal) {
+ LOG(ERROR) << "Flush: invalid state";
+ return;
+ }
+ state_ = kFlushing;
+ if (!SendMFTMessage(MFT_MESSAGE_COMMAND_FLUSH)) {
+ LOG(WARNING) << "MftH264DecodeEngine::Flush failed to send message";
+ }
+ state_ = kNormal;
+ event_handler_->OnFlushComplete();
+}
+
+void MftH264DecodeEngine::Seek() {
+ if (state_ != kNormal) {
+ LOG(ERROR) << "Seek: invalid state";
+ return;
+ }
+
+ // TODO(hclam): Seriously the logic in VideoRendererBase is flawed that we
+ // have to perform the following hack to get playback going.
+ for (size_t i = 0; i < Limits::kMaxVideoFrames; ++i) {
+ event_handler_->ConsumeVideoFrame(output_frames_[0]);
+ }
+
+ // Seek not implemented.
+ event_handler_->OnSeekComplete();
+}
+
+void MftH264DecodeEngine::ConsumeVideoSample(scoped_refptr<Buffer> buffer) {
+ if (state_ == kUninitialized) {
+ LOG(ERROR) << "ConsumeVideoSample: invalid state";
+ }
+ ScopedComPtr<IMFSample> sample;
+ if (!buffer->IsEndOfStream()) {
+ sample.Attach(
+ CreateInputSample(buffer->GetData(),
+ buffer->GetDataSize(),
+ buffer->GetTimestamp().InMicroseconds() * 10,
+ buffer->GetDuration().InMicroseconds() * 10,
+ input_stream_info_.cbSize,
+ input_stream_info_.cbAlignment));
+ if (!sample.get()) {
+ LOG(ERROR) << "Failed to create an input sample";
+ } else {
+ if (FAILED(decode_engine_->ProcessInput(0, sample.get(), 0))) {
+ event_handler_->OnError();
+ }
+ }
+ } else {
+ if (state_ != MftH264DecodeEngine::kEosDrain) {
+ // End of stream, send drain messages.
+ if (!SendMFTMessage(MFT_MESSAGE_NOTIFY_END_OF_STREAM) ||
+ !SendMFTMessage(MFT_MESSAGE_COMMAND_DRAIN)) {
+ LOG(ERROR) << "Failed to send EOS / drain messages to MFT";
+ event_handler_->OnError();
+ } else {
+ state_ = MftH264DecodeEngine::kEosDrain;
+ }
+ }
+ }
+ DoDecode();
+}
+
+void MftH264DecodeEngine::ProduceVideoFrame(scoped_refptr<VideoFrame> frame) {
+ if (state_ == kUninitialized) {
+ LOG(ERROR) << "ProduceVideoFrame: invalid state";
+ return;
+ }
+ event_handler_->ProduceVideoSample(NULL);
+}
+
+// private methods
+
+// static
+bool MftH264DecodeEngine::StartupComLibraries() {
+ HRESULT hr;
+ hr = CoInitializeEx(NULL,
+ COINIT_APARTMENTTHREADED | COINIT_DISABLE_OLE1DDE);
+ if (FAILED(hr)) {
+ LOG(ERROR) << "CoInit fail";
+ return false;
+ }
+
+ hr = MFStartup(MF_VERSION, MFSTARTUP_FULL);
+ if (FAILED(hr)) {
+ LOG(ERROR) << "MFStartup fail";
+ CoUninitialize();
+ return false;
+ }
+ return true;
+}
+
+// static
+void MftH264DecodeEngine::ShutdownComLibraries() {
+ HRESULT hr;
+ hr = MFShutdown();
+ if (FAILED(hr)) {
+ LOG(WARNING) << "Warning: MF failed to shutdown";
+ }
+ CoUninitialize();
+}
+
+bool MftH264DecodeEngine::EnableDxva() {
+ IDirect3DDevice9* device = static_cast<IDirect3DDevice9*>(
+ context_->GetDevice());
+ ScopedComPtr<IDirect3DDeviceManager9> device_manager;
+ UINT dev_manager_reset_token = 0;
+ HRESULT hr = DXVA2CreateDirect3DDeviceManager9(&dev_manager_reset_token,
+ device_manager.Receive());
+ if (FAILED(hr)) {
+ LOG(ERROR) << "Couldn't create D3D Device manager";
+ return false;
+ }
+
+ hr = device_manager->ResetDevice(device, dev_manager_reset_token);
+ if (FAILED(hr)) {
+ LOG(ERROR) << "Failed to reset device";
+ return false;
+ }
+
+ hr = decode_engine_->ProcessMessage(
+ MFT_MESSAGE_SET_D3D_MANAGER,
+ reinterpret_cast<ULONG_PTR>(device_manager.get()));
+ if (FAILED(hr)) {
+ LOG(ERROR) << "Failed to set D3D9 device manager to decoder "
+ << std::hex << hr;
+ return false;
+ }
+
+ return true;
+}
+
+bool MftH264DecodeEngine::InitInternal() {
+ if (!StartupComLibraries())
+ return false;
+ if (!InitDecodeEngine())
+ return false;
+ if (!GetStreamsInfoAndBufferReqs())
+ return false;
+ return SendMFTMessage(MFT_MESSAGE_NOTIFY_BEGIN_STREAMING);
+}
+
+bool MftH264DecodeEngine::InitDecodeEngine() {
+ // TODO(jiesun): use MFEnum to get decoder CLSID.
+ HRESULT hr = CoCreateInstance(__uuidof(CMSH264DecoderMFT),
+ NULL,
+ CLSCTX_INPROC_SERVER,
+ __uuidof(IMFTransform),
+ reinterpret_cast<void**>(
+ decode_engine_.Receive()));
+ if (FAILED(hr) || !decode_engine_.get()) {
+ LOG(ERROR) << "CoCreateInstance failed " << std::hex << std::showbase << hr;
+ return false;
+ }
+ if (!CheckDecodeEngineDxvaSupport())
+ return false;
+ if (use_dxva_ && !EnableDxva())
+ return false;
+ return SetDecodeEngineMediaTypes();
+}
+
+void MftH264DecodeEngine::AllocFramesFromContext() {
+ if (!use_dxva_)
+ return;
+
+ // TODO(imcheng): Pass in an actual task. (From EventHandler?)
+ context_->ReleaseAllVideoFrames();
+ output_frames_.clear();
+ context_->AllocateVideoFrames(
+ 1, info_.stream_info.surface_width, info_.stream_info.surface_height,
+ VideoFrame::RGBA, &output_frames_,
+ NewRunnableMethod(this, &MftH264DecodeEngine::OnAllocFramesDone));
+}
+
+void MftH264DecodeEngine::OnAllocFramesDone() {
+ event_handler_->OnInitializeComplete(info_);
+}
+
+bool MftH264DecodeEngine::CheckDecodeEngineDxvaSupport() {
+ ScopedComPtr<IMFAttributes> attributes;
+ HRESULT hr = decode_engine_->GetAttributes(attributes.Receive());
+ if (FAILED(hr)) {
+ LOG(ERROR) << "Unlock: Failed to get attributes, hr = "
+ << std::hex << std::showbase << hr;
+ return false;
+ }
+
+ UINT32 dxva;
+ hr = attributes->GetUINT32(MF_SA_D3D_AWARE, &dxva);
+ if (FAILED(hr) || !dxva) {
+ LOG(ERROR) << "Failed to get DXVA attr or decoder is not DXVA-aware, hr = "
+ << std::hex << std::showbase << hr
+ << " this might not be the right decoder.";
+ return false;
+ }
+ return true;
+}
+
+bool MftH264DecodeEngine::SetDecodeEngineMediaTypes() {
+ if (!SetDecodeEngineInputMediaType())
+ return false;
+ return SetDecodeEngineOutputMediaType(
+ ConvertVideoFrameFormatToGuid(info_.stream_info.surface_format));
+}
+
+bool MftH264DecodeEngine::SetDecodeEngineInputMediaType() {
+ ScopedComPtr<IMFMediaType> media_type;
+ HRESULT hr = MFCreateMediaType(media_type.Receive());
+ if (FAILED(hr)) {
+ LOG(ERROR) << "Failed to create empty media type object";
+ return false;
+ }
+
+ hr = media_type->SetGUID(MF_MT_MAJOR_TYPE, MFMediaType_Video);
+ if (FAILED(hr)) {
+ LOG(ERROR) << "SetGUID for major type failed";
+ return false;
+ }
+
+ hr = media_type->SetGUID(MF_MT_SUBTYPE, MFVideoFormat_H264);
+ if (FAILED(hr)) {
+ LOG(ERROR) << "SetGUID for subtype failed";
+ return false;
+ }
+
+ hr = decode_engine_->SetInputType(0, media_type.get(), 0); // No flags
+ if (FAILED(hr)) {
+ LOG(ERROR) << "Failed to set decoder's input type";
+ return false;
+ }
+
+ return true;
+}
+
+bool MftH264DecodeEngine::SetDecodeEngineOutputMediaType(const GUID subtype) {
+ DWORD i = 0;
+ IMFMediaType* out_media_type;
+ bool found = false;
+ while (SUCCEEDED(decode_engine_->GetOutputAvailableType(0, i,
+ &out_media_type))) {
+ GUID out_subtype;
+ HRESULT hr = out_media_type->GetGUID(MF_MT_SUBTYPE, &out_subtype);
+ if (FAILED(hr)) {
+ LOG(ERROR) << "Failed to GetGUID() on GetOutputAvailableType() " << i;
+ out_media_type->Release();
+ continue;
+ }
+ if (out_subtype == subtype) {
+ hr = decode_engine_->SetOutputType(0, out_media_type, 0); // No flags
+ hr = MFGetAttributeSize(out_media_type, MF_MT_FRAME_SIZE,
+ reinterpret_cast<UINT32*>(&info_.stream_info.surface_width),
+ reinterpret_cast<UINT32*>(&info_.stream_info.surface_height));
+ config_.width = info_.stream_info.surface_width;
+ config_.height = info_.stream_info.surface_height;
+ if (FAILED(hr)) {
+ LOG(ERROR) << "Failed to SetOutputType to |subtype| or obtain "
+ << "width/height " << std::hex << hr;
+ } else {
+ out_media_type->Release();
+ return true;
+ }
+ }
+ i++;
+ out_media_type->Release();
+ }
+ return false;
+}
+
+bool MftH264DecodeEngine::SendMFTMessage(MFT_MESSAGE_TYPE msg) {
+ HRESULT hr = decode_engine_->ProcessMessage(msg, NULL);
+ return SUCCEEDED(hr);
+}
+
+// Prints out info about the input/output streams, gets the minimum buffer sizes
+// for input and output samples.
+// The MFT will not allocate buffer for neither input nor output, so we have
+// to do it ourselves and make sure they're the correct size.
+// Exception is when dxva is enabled, the decoder will allocate output.
+bool MftH264DecodeEngine::GetStreamsInfoAndBufferReqs() {
+ HRESULT hr = decode_engine_->GetInputStreamInfo(0, &input_stream_info_);
+ if (FAILED(hr)) {
+ LOG(ERROR) << "Failed to get input stream info";
+ return false;
+ }
+ LOG(INFO) << "Input stream info: ";
+ LOG(INFO) << "Max latency: " << input_stream_info_.hnsMaxLatency;
+
+ // There should be three flags, one for requiring a whole frame be in a
+ // single sample, one for requiring there be one buffer only in a single
+ // sample, and one that specifies a fixed sample size. (as in cbSize)
+ LOG(INFO) << "Flags: "
+ << std::hex << std::showbase << input_stream_info_.dwFlags;
+ CHECK_EQ(input_stream_info_.dwFlags, 0x7u);
+ LOG(INFO) << "Min buffer size: " << input_stream_info_.cbSize;
+ LOG(INFO) << "Max lookahead: " << input_stream_info_.cbMaxLookahead;
+ LOG(INFO) << "Alignment: " << input_stream_info_.cbAlignment;
+
+ hr = decode_engine_->GetOutputStreamInfo(0, &output_stream_info_);
+ if (FAILED(hr)) {
+ LOG(ERROR) << "Failed to get output stream info";
+ return false;
+ }
+ LOG(INFO) << "Output stream info: ";
+ // The flags here should be the same and mean the same thing, except when
+ // DXVA is enabled, there is an extra 0x100 flag meaning decoder will
+ // allocate its own sample.
+ LOG(INFO) << "Flags: "
+ << std::hex << std::showbase << output_stream_info_.dwFlags;
+ CHECK_EQ(output_stream_info_.dwFlags, use_dxva_ ? 0x107u : 0x7u);
+ LOG(INFO) << "Min buffer size: " << output_stream_info_.cbSize;
+ LOG(INFO) << "Alignment: " << output_stream_info_.cbAlignment;
+
+ return true;
+}
+
+bool MftH264DecodeEngine::DoDecode() {
+ if (state_ != kNormal && state_ != kEosDrain) {
+ LOG(ERROR) << "DoDecode: not in normal or drain state";
+ return false;
+ }
+ scoped_refptr<VideoFrame> frame;
+ ScopedComPtr<IMFSample> output_sample;
+ if (!use_dxva_) {
+ output_sample.Attach(
+ CreateEmptySampleWithBuffer(output_stream_info_.cbSize,
+ output_stream_info_.cbAlignment));
+ if (!output_sample.get()) {
+ LOG(ERROR) << "GetSample: failed to create empty output sample";
+ event_handler_->OnError();
+ return false;
+ }
+ }
+ MFT_OUTPUT_DATA_BUFFER output_data_buffer;
+ memset(&output_data_buffer, 0, sizeof(output_data_buffer));
+ output_data_buffer.dwStreamID = 0;
+ output_data_buffer.pSample = output_sample;
+
+ DWORD status;
+ HRESULT hr = decode_engine_->ProcessOutput(0, // No flags
+ 1, // # of out streams to pull
+ &output_data_buffer,
+ &status);
+
+ IMFCollection* events = output_data_buffer.pEvents;
+ if (events != NULL) {
+ LOG(INFO) << "Got events from ProcessOuput, but discarding";
+ events->Release();
+ }
+
+ if (FAILED(hr)) {
+ if (hr == MF_E_TRANSFORM_STREAM_CHANGE) {
+ hr = SetDecodeEngineOutputMediaType(
+ ConvertVideoFrameFormatToGuid(info_.stream_info.surface_format));
+ if (SUCCEEDED(hr)) {
+ // TODO(hclam): Need to fix this case. This happens when we have a
+ // format change. We have to resume decoding only after we have
+ // allocated a new set of video frames.
+ // AllocFramesFromContext();
+ // event_handler_->OnFormatChange(info_.stream_info);
+ event_handler_->ProduceVideoSample(NULL);
+ return true;
+ } else {
+ event_handler_->OnError();
+ return false;
+ }
+ } else if (hr == MF_E_TRANSFORM_NEED_MORE_INPUT) {
+ if (state_ == kEosDrain) {
+ // No more output from the decoder. Notify EOS and stop playback.
+ scoped_refptr<VideoFrame> frame;
+ VideoFrame::CreateEmptyFrame(&frame);
+ event_handler_->ConsumeVideoFrame(frame);
+ state_ = MftH264DecodeEngine::kStopped;
+ return false;
+ }
+ event_handler_->ProduceVideoSample(NULL);
+ return true;
+ } else {
+ LOG(ERROR) << "Unhandled error in DoDecode()";
+ state_ = MftH264DecodeEngine::kStopped;
+ event_handler_->OnError();
+ return false;
+ }
+ }
+
+ // We succeeded in getting an output sample.
+ if (use_dxva_) {
+ // For DXVA we didn't provide the sample, i.e. output_sample was NULL.
+ output_sample.Attach(output_data_buffer.pSample);
+ }
+ if (!output_sample.get()) {
+ LOG(ERROR) << "ProcessOutput succeeded, but did not get a sample back";
+ event_handler_->OnError();
+ return true;
+ }
+
+ int64 timestamp = 0, duration = 0;
+ if (FAILED(output_sample->GetSampleTime(&timestamp)) ||
+ FAILED(output_sample->GetSampleDuration(&duration))) {
+ LOG(WARNING) << "Failed to get timestamp/duration from output";
+ }
+
+ // The duration and timestamps are in 100-ns units, so divide by 10
+ // to convert to microseconds.
+ timestamp /= 10;
+ duration /= 10;
+
+ // Sanity checks for checking if there is really something in the sample.
+ DWORD buf_count;
+ hr = output_sample->GetBufferCount(&buf_count);
+ if (FAILED(hr) || buf_count != 1) {
+ LOG(ERROR) << "Failed to get buffer count, or buffer count mismatch";
+ return true;
+ }
+
+ ScopedComPtr<IMFMediaBuffer> output_buffer;
+ hr = output_sample->GetBufferByIndex(0, output_buffer.Receive());
+ if (FAILED(hr)) {
+ LOG(ERROR) << "Failed to get buffer from sample";
+ return true;
+ }
+ if (use_dxva_) {
+ ScopedComPtr<IDirect3DSurface9, &IID_IDirect3DSurface9> surface;
+ hr = MFGetService(output_buffer, MR_BUFFER_SERVICE,
+ IID_PPV_ARGS(surface.Receive()));
+ if (FAILED(hr)) {
+ LOG(ERROR) << "Failed to get surface from buffer";
+ return true;
+ }
+ // Since we only allocated 1 frame from context.
+ // TODO(imcheng): Detect error.
+ output_frames_[0]->SetTimestamp(TimeDelta::FromMicroseconds(timestamp));
+ output_frames_[0]->SetDuration(TimeDelta::FromMicroseconds(duration));
+ context_->UploadToVideoFrame(
+ surface.get(), output_frames_[0],
+ NewRunnableMethod(this, &MftH264DecodeEngine::OnUploadVideoFrameDone,
+ surface, output_frames_[0]));
+ return true;
+ } else {
+ // TODO(hclam): Remove this branch.
+ // Not DXVA.
+ VideoFrame::CreateFrame(info_.stream_info.surface_format,
+ info_.stream_info.surface_width,
+ info_.stream_info.surface_height,
+ TimeDelta::FromMicroseconds(timestamp),
+ TimeDelta::FromMicroseconds(duration),
+ &frame);
+ if (!frame.get()) {
+ LOG(ERROR) << "Failed to allocate video frame for yuv plane";
+ event_handler_->OnError();
+ return true;
+ }
+ uint8* src_y;
+ DWORD max_length, current_length;
+ HRESULT hr = output_buffer->Lock(&src_y, &max_length, &current_length);
+ if (FAILED(hr))
+ return true;
+ uint8* dst_y = static_cast<uint8*>(frame->data(VideoFrame::kYPlane));
+
+ memcpy(dst_y, src_y, current_length);
+ CHECK(SUCCEEDED(output_buffer->Unlock()));
+ event_handler_->ConsumeVideoFrame(frame);
+ return true;
+ }
+}
+
+void MftH264DecodeEngine::OnUploadVideoFrameDone(
+ ScopedComPtr<IDirect3DSurface9, &IID_IDirect3DSurface9> surface,
+ scoped_refptr<media::VideoFrame> frame) {
+ // After this method is exited the reference to surface is released.
+ event_handler_->ConsumeVideoFrame(frame);
+}
+
+} // namespace media
+
+DISABLE_RUNNABLE_METHOD_REFCOUNT(media::MftH264DecodeEngine);
diff --git a/media/video/mft_h264_decode_engine.h b/media/video/mft_h264_decode_engine.h
new file mode 100644
index 0000000..e13dce9
--- /dev/null
+++ b/media/video/mft_h264_decode_engine.h
@@ -0,0 +1,102 @@
+// Copyright (c) 2010 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+//
+// MFT H.264 decode engine.
+
+#ifndef MEDIA_VIDEO_MFT_H264_DECODE_ENGINE_H_
+#define MEDIA_VIDEO_MFT_H264_DECODE_ENGINE_H_
+
+// TODO(imcheng): Get rid of this header by:
+// - forward declaring IMFTransform and its IID as in
+// mft_h264_decode_engine_context.h
+// - turning the general SendMFTMessage method into specific methods
+// (SendFlushMessage, SendDrainMessage, etc.) to avoid having
+// MFT_MESSAGE_TYPE in here
+#include <mfidl.h>
+
+#include "base/gtest_prod_util.h"
+#include "base/scoped_comptr_win.h"
+#include "media/video/video_decode_engine.h"
+
+struct IDirect3DSurface9;
+extern "C" const GUID IID_IDirect3DSurface9;
+
+class MessageLoop;
+
+namespace media {
+
+class VideoDecodeContext;
+
+class MftH264DecodeEngine : public media::VideoDecodeEngine {
+ public:
+ typedef enum {
+ kUninitialized, // un-initialized.
+ kNormal, // normal playing state.
+ kFlushing, // upon received Flush(), before FlushDone()
+ kEosDrain, // upon input EOS received.
+ kStopped, // upon output EOS received.
+ } State;
+
+ explicit MftH264DecodeEngine(bool use_dxva);
+ virtual ~MftH264DecodeEngine();
+
+ // VideoDecodeEngine implementation.
+ virtual void Initialize(MessageLoop* message_loop,
+ media::VideoDecodeEngine::EventHandler* event_handler,
+ VideoDecodeContext* context,
+ const VideoCodecConfig& config);
+ virtual void Uninitialize();
+ virtual void Flush();
+ virtual void Seek();
+ virtual void ConsumeVideoSample(scoped_refptr<Buffer> buffer);
+ virtual void ProduceVideoFrame(scoped_refptr<VideoFrame> frame);
+
+ bool use_dxva() const { return use_dxva_; }
+ State state() const { return state_; }
+
+ private:
+ friend class MftH264DecodeEngineTest;
+ FRIEND_TEST_ALL_PREFIXES(MftH264DecodeEngineTest, LibraryInit);
+
+ // TODO(jiesun): Find a way to move all these to GpuVideoService..
+ static bool StartupComLibraries();
+ static void ShutdownComLibraries();
+ bool EnableDxva();
+
+ bool InitInternal();
+ bool InitDecodeEngine();
+ void AllocFramesFromContext();
+ bool CheckDecodeEngineDxvaSupport();
+ bool SetDecodeEngineMediaTypes();
+ bool SetDecodeEngineInputMediaType();
+ bool SetDecodeEngineOutputMediaType(const GUID subtype);
+ bool SendMFTMessage(MFT_MESSAGE_TYPE msg);
+ bool GetStreamsInfoAndBufferReqs();
+ bool DoDecode();
+ void OnAllocFramesDone();
+ void OnUploadVideoFrameDone(
+ ScopedComPtr<IDirect3DSurface9, &IID_IDirect3DSurface9> surface,
+ scoped_refptr<media::VideoFrame> frame);
+
+ bool use_dxva_;
+ ScopedComPtr<IMFTransform> decode_engine_;
+
+ MFT_INPUT_STREAM_INFO input_stream_info_;
+ MFT_OUTPUT_STREAM_INFO output_stream_info_;
+
+ State state_;
+
+ VideoDecodeEngine::EventHandler* event_handler_;
+ VideoCodecConfig config_;
+ VideoCodecInfo info_;
+
+ VideoDecodeContext* context_;
+ std::vector<scoped_refptr<VideoFrame> > output_frames_;
+
+ DISALLOW_COPY_AND_ASSIGN(MftH264DecodeEngine);
+};
+
+} // namespace media
+
+#endif // MEDIA_VIDEO_MFT_H264_DECODE_ENGINE_H_
diff --git a/media/video/mft_h264_decode_engine_context.cc b/media/video/mft_h264_decode_engine_context.cc
deleted file mode 100644
index 1759ced..0000000
--- a/media/video/mft_h264_decode_engine_context.cc
+++ /dev/null
@@ -1,179 +0,0 @@
-// Copyright (c) 2010 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "media/video/mft_h264_decode_engine_context.h"
-
-#include <algorithm>
-#include <vector>
-
-#include <d3d9.h>
-
-#include "base/task.h"
-#include "media/base/callback.h"
-
-#pragma comment(lib, "dxva2.lib")
-#pragma comment(lib, "d3d9.lib")
-
-using base::TimeDelta;
-
-namespace media {
-
-static D3DFORMAT VideoFrameToD3DFormat(VideoFrame::Format format) {
- switch (format) {
- case VideoFrame::RGB555:
- return D3DFMT_X1R5G5B5;
- case VideoFrame::RGB565:
- return D3DFMT_R5G6B5;
- case VideoFrame::RGB32:
- return D3DFMT_X8R8G8B8;
- case VideoFrame::RGBA:
- return D3DFMT_A8R8G8B8;
- default:
- // Note that although there is a corresponding type for VideoFrame::RGB24
- // (D3DFMT_R8G8B8), it is not supported by render targets.
- NOTREACHED() << "Unsupported format";
- return D3DFMT_UNKNOWN;
- }
-}
-
-static IDirect3DTexture9* GetTexture(scoped_refptr<VideoFrame> frame) {
- return static_cast<IDirect3DTexture9*>(frame->d3d_texture(0));
-}
-
-static void ReleaseTexture(scoped_refptr<VideoFrame> frame) {
- GetTexture(frame)->Release();
-}
-
-static void ReleaseTextures(
- const std::vector<scoped_refptr<VideoFrame> >& frames) {
- std::for_each(frames.begin(), frames.end(), ReleaseTexture);
-}
-
-MftH264DecodeEngineContext::MftH264DecodeEngineContext(HWND device_window)
- : initialized_(false),
- device_window_(device_window),
- d3d9_(NULL),
- device_(NULL) {
- DCHECK(device_window);
-}
-
-MftH264DecodeEngineContext::~MftH264DecodeEngineContext() {
-}
-
-// TODO(imcheng): This should set the success variable once the API is
-// finalized.
-void MftH264DecodeEngineContext::Initialize(Task* task) {
- AutoTaskRunner runner(task);
- if (initialized_)
- return;
- d3d9_ = Direct3DCreate9(D3D_SDK_VERSION);
- if (!d3d9_) {
- LOG(ERROR) << "Direct3DCreate9 failed";
- return;
- }
-
- D3DPRESENT_PARAMETERS present_params = {0};
- present_params.BackBufferWidth = 0;
- present_params.BackBufferHeight = 0;
- present_params.BackBufferFormat = D3DFMT_UNKNOWN;
- present_params.BackBufferCount = 1;
- present_params.SwapEffect = D3DSWAPEFFECT_DISCARD;
- present_params.hDeviceWindow = device_window_;
- present_params.Windowed = TRUE;
- present_params.Flags = D3DPRESENTFLAG_VIDEO;
- present_params.FullScreen_RefreshRateInHz = 0;
- present_params.PresentationInterval = 0;
-
- HRESULT hr = d3d9_->CreateDevice(D3DADAPTER_DEFAULT,
- D3DDEVTYPE_HAL,
- device_window_,
- (D3DCREATE_HARDWARE_VERTEXPROCESSING |
- D3DCREATE_MULTITHREADED),
- &present_params,
- device_.Receive());
- if (FAILED(hr)) {
- LOG(ERROR) << "CreateDevice failed " << std::hex << hr;
- return;
- }
- initialized_ = true;
-}
-
-void* MftH264DecodeEngineContext::GetDevice() {
- return device_.get();
-}
-
-void MftH264DecodeEngineContext::AllocateVideoFrames(
- int n, size_t width, size_t height, VideoFrame::Format format,
- std::vector<scoped_refptr<VideoFrame> >* frames,
- Task* task) {
- DCHECK(initialized_);
- DCHECK_GT(n, 0);
- DCHECK(frames);
-
- AutoTaskRunner runner(task);
- D3DFORMAT d3d_format = VideoFrameToD3DFormat(format);
- std::vector<scoped_refptr<VideoFrame> > temp_frames;
- temp_frames.reserve(n);
- HRESULT hr;
- for (int i = 0; i < n; i++) {
- IDirect3DTexture9* texture = NULL;
- hr = device_->CreateTexture(width, height, 1, D3DUSAGE_RENDERTARGET,
- d3d_format, D3DPOOL_DEFAULT, &texture, NULL);
- if (FAILED(hr)) {
- LOG(ERROR) << "CreateTexture " << i << " failed " << std::hex << hr;
- ReleaseTextures(temp_frames);
- return;
- }
- VideoFrame::D3dTexture texture_array[VideoFrame::kMaxPlanes] =
- { texture, texture, texture };
- scoped_refptr<VideoFrame> texture_frame;
- VideoFrame::CreateFrameD3dTexture(format, width, height, texture_array,
- TimeDelta(), TimeDelta(), &texture_frame);
- if (!texture_frame.get()) {
- LOG(ERROR) << "CreateFrameD3dTexture " << i << " failed";
- texture->Release();
- ReleaseTextures(temp_frames);
- return;
- }
- temp_frames.push_back(texture_frame);
- }
- frames->assign(temp_frames.begin(), temp_frames.end());
- managed_frames_.insert(managed_frames_.end(),
- temp_frames.begin(), temp_frames.end());
-}
-
-bool MftH264DecodeEngineContext::UploadToVideoFrame(
- void* source, scoped_refptr<VideoFrame> frame) {
- DCHECK(initialized_);
- DCHECK(source);
- DCHECK(frame.get());
-
- IDirect3DSurface9* surface = static_cast<IDirect3DSurface9*>(source);
- IDirect3DTexture9* texture = GetTexture(frame);
- ScopedComPtr<IDirect3DSurface9> top_surface;
- HRESULT hr;
- hr = texture->GetSurfaceLevel(0, top_surface.Receive());
- if (FAILED(hr)) {
- LOG(ERROR) << "GetSurfaceLevel failed " << std::hex << hr;
- return false;
- }
- hr = device_->StretchRect(surface, NULL, top_surface.get(), NULL,
- D3DTEXF_NONE);
- if (FAILED(hr)) {
- LOG(ERROR) << "StretchRect failed " << std::hex << hr;
- return false;
- }
- return true;
-}
-
-void MftH264DecodeEngineContext::ReleaseAllVideoFrames() {
- ReleaseTextures(managed_frames_);
- managed_frames_.clear();
-}
-
-void MftH264DecodeEngineContext::Destroy(Task* task) {
- AutoTaskRunner runner(task);
-}
-
-} // namespace media
diff --git a/media/video/mft_h264_decode_engine_context.h b/media/video/mft_h264_decode_engine_context.h
deleted file mode 100644
index d33f06c..0000000
--- a/media/video/mft_h264_decode_engine_context.h
+++ /dev/null
@@ -1,70 +0,0 @@
-// Copyright (c) 2010 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-//
-// Video decode context for MftH264DecodeEngine. This context manages
-// VideoFrame objects for the DXVA-enabled MFT H.264 decode engine, and
-// converts its output (which is IDirect3DSurface9) into IDirect3DTexture9
-// (wrapped in a VideoFrame object), which will be compatible with ANGLE.
-
-#ifndef MEDIA_VIDEO_MFT_H264_DECODE_ENGINE_CONTEXT_H_
-#define MEDIA_VIDEO_MFT_H264_DECODE_ENGINE_CONTEXT_H_
-
-#include <vector>
-
-#include "base/scoped_comptr_win.h"
-#include "media/base/video_frame.h"
-#include "media/video/video_decode_context.h"
-
-class Task;
-
-struct IDirect3D9;
-extern "C" const GUID IID_IDirect3D9;
-struct IDirect3DDevice9;
-extern "C" const GUID IID_IDirect3DDevice9;
-
-namespace media {
-
-// TODO(imcheng): Make it implement VideoDecodeContext once the API
-// is finalized.
-class MftH264DecodeEngineContext {
- public:
- // Constructs a MftH264DecodeEngineContext with the D3D device attached
- // to |device_window|. This device does not own the window, so the caller
- // must destroy the window explicitly after the destruction of this object.
- explicit MftH264DecodeEngineContext(HWND device_window);
- virtual ~MftH264DecodeEngineContext();
-
- // TODO(imcheng): Is this a part of the API?
- virtual void Initialize(Task* task);
-
- // Gets the underlying IDirect3DDevice9.
- virtual void* GetDevice();
-
- // Allocates IDirect3DTexture9 objects wrapped in VideoFrame objects.
- virtual void AllocateVideoFrames(
- int n, size_t width, size_t height, VideoFrame::Format format,
- std::vector<scoped_refptr<VideoFrame> >* frames,
- Task* task);
-
- // TODO(imcheng): Make this follow the API once it is finalized.
- // Uploads the decoded frame (IDirect3DSurface9) to a VideoFrame allocated
- // by AllocateVideoFrames().
- virtual bool UploadToVideoFrame(void* source,
- scoped_refptr<VideoFrame> frame);
- virtual void ReleaseAllVideoFrames();
- virtual void Destroy(Task* task);
-
- bool initialized() const { return initialized_; }
-
- private:
- bool initialized_;
- HWND device_window_;
- std::vector<scoped_refptr<VideoFrame> > managed_frames_;
- ScopedComPtr<IDirect3D9, &IID_IDirect3D9> d3d9_;
- ScopedComPtr<IDirect3DDevice9, &IID_IDirect3DDevice9> device_;
-};
-
-} // namespace media
-
-#endif // MEDIA_VIDEO_MFT_H264_DECODE_ENGINE_CONTEXT_H_
diff --git a/media/video/mft_h264_decode_engine_unittest.cc b/media/video/mft_h264_decode_engine_unittest.cc
new file mode 100644
index 0000000..fcf7d69
--- /dev/null
+++ b/media/video/mft_h264_decode_engine_unittest.cc
@@ -0,0 +1,410 @@
+// Copyright (c) 2010 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/file_path.h"
+#include "base/file_util.h"
+#include "base/message_loop.h"
+#include "base/path_service.h"
+#include "base/ref_counted.h"
+#include "base/scoped_ptr.h"
+#include "base/string_util.h"
+#include "base/time.h"
+#include "media/base/data_buffer.h"
+#include "media/base/video_frame.h"
+#include "media/tools/mft_h264_example/file_reader_util.h"
+#include "media/video/mft_h264_decode_engine.h"
+#include "media/video/mft_h264_decode_engine_context.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+using base::TimeDelta;
+
+namespace media {
+
+static const int kDecoderMaxWidth = 1920;
+static const int kDecoderMaxHeight = 1088;
+
+// Helper classes
+
+class BaseMftReader : public base::RefCountedThreadSafe<BaseMftReader> {
+ public:
+ virtual ~BaseMftReader() {}
+ virtual void ReadCallback(scoped_refptr<DataBuffer>* input) = 0;
+};
+
+class FakeMftReader : public BaseMftReader {
+ public:
+ FakeMftReader() : frames_remaining_(20) {}
+ explicit FakeMftReader(int count) : frames_remaining_(count) {}
+ virtual ~FakeMftReader() {}
+
+ // Provides garbage input to the decoder.
+ virtual void ReadCallback(scoped_refptr<DataBuffer>* input) {
+ if (frames_remaining_ > 0) {
+ int sz = 4096;
+ uint8* buf = new uint8[sz];
+ memset(buf, 42, sz);
+ *input = new DataBuffer(buf, sz);
+ (*input)->SetDuration(base::TimeDelta::FromMicroseconds(5000));
+ (*input)->SetTimestamp(
+ base::TimeDelta::FromMicroseconds(
+ 50000000 - frames_remaining_ * 10000));
+ --frames_remaining_;
+ } else {
+ // Emulate end of stream on the last "frame".
+ *input = new DataBuffer(0);
+ }
+ }
+ int frames_remaining() const { return frames_remaining_; }
+
+ private:
+ int frames_remaining_;
+};
+
+class SimpleMftH264DecodeEngineHandler
+ : public VideoDecodeEngine::EventHandler {
+ public:
+ SimpleMftH264DecodeEngineHandler()
+ : init_count_(0),
+ uninit_count_(0),
+ flush_count_(0),
+ format_change_count_(0),
+ empty_buffer_callback_count_(0),
+ fill_buffer_callback_count_(0) {
+ memset(&info_, 0, sizeof(info_));
+ }
+ virtual ~SimpleMftH264DecodeEngineHandler() {}
+ virtual void OnInitializeComplete(const VideoCodecInfo& info) {
+ info_ = info;
+ init_count_++;
+ }
+ virtual void OnUninitializeComplete() {
+ uninit_count_++;
+ }
+ virtual void OnFlushComplete() {
+ flush_count_++;
+ }
+ virtual void OnSeekComplete() {}
+ virtual void OnError() {}
+ virtual void OnFormatChange(VideoStreamInfo stream_info) {
+ format_change_count_++;
+ info_.stream_info = stream_info;
+ }
+ virtual void ProduceVideoSample(scoped_refptr<Buffer> buffer) {
+ if (reader_.get() && decoder_) {
+ empty_buffer_callback_count_++;
+ scoped_refptr<DataBuffer> input;
+ reader_->ReadCallback(&input);
+ decoder_->ConsumeVideoSample(input);
+ }
+ }
+ virtual void ConsumeVideoFrame(scoped_refptr<VideoFrame> frame) {
+ fill_buffer_callback_count_++;
+ current_frame_ = frame;
+ }
+ void SetReader(scoped_refptr<BaseMftReader> reader) {
+ reader_ = reader;
+ }
+ void SetDecodeEngine(MftH264DecodeEngine* decoder) {
+ decoder_ = decoder;
+ }
+
+ int init_count_;
+ int uninit_count_;
+ int flush_count_;
+ int format_change_count_;
+ int empty_buffer_callback_count_;
+ int fill_buffer_callback_count_;
+ VideoCodecInfo info_;
+ scoped_refptr<BaseMftReader> reader_;
+ MftH264DecodeEngine* decoder_;
+ scoped_refptr<VideoFrame> current_frame_;
+};
+
+class FFmpegFileReaderWrapper : public BaseMftReader {
+ public:
+ FFmpegFileReaderWrapper() {}
+ virtual ~FFmpegFileReaderWrapper() {}
+ bool InitReader(const std::string& filename) {
+ reader_.reset(new FFmpegFileReader(filename));
+ if (!reader_.get() || !reader_->Initialize()) {
+ reader_.reset();
+ return false;
+ }
+ return true;
+ }
+ virtual void ReadCallback(scoped_refptr<DataBuffer>* input) {
+ if (reader_.get()) {
+ reader_->Read(input);
+ }
+ }
+ bool GetWidth(int* width) {
+ if (!reader_.get())
+ return false;
+ return reader_->GetWidth(width);
+ }
+ bool GetHeight(int* height) {
+ if (!reader_.get())
+ return false;
+ return reader_->GetHeight(height);
+ }
+ scoped_ptr<FFmpegFileReader> reader_;
+};
+
+// Helper functions
+
+static FilePath GetVideoFilePath(const std::string& file_name) {
+ FilePath path;
+ PathService::Get(base::DIR_SOURCE_ROOT, &path);
+ path = path.AppendASCII("media")
+ .AppendASCII("test")
+ .AppendASCII("data")
+ .AppendASCII(file_name.c_str());
+ return path;
+}
+
+class MftH264DecodeEngineTest : public testing::Test {
+ protected:
+ MftH264DecodeEngineTest()
+ : loop_(),
+ window_(NULL),
+ handler_(NULL),
+ engine_(NULL),
+ context_(NULL) {
+ }
+ virtual ~MftH264DecodeEngineTest() {}
+ virtual void SetUp() {
+ handler_.reset(new SimpleMftH264DecodeEngineHandler());
+ }
+ virtual void TearDown() {
+ if (context_.get()) {
+ context_->ReleaseAllVideoFrames();
+ context_->Destroy(NULL);
+ }
+ if (window_)
+ DestroyWindow(window_);
+ }
+ void GetDecodeEngine(bool dxva) {
+ if (dxva) {
+ if (!window_)
+ CreateDrawWindow();
+ context_.reset(new MftH264DecodeEngineContext(window_));
+ ASSERT_TRUE(context_.get());
+ context_->Initialize(NULL);
+ ASSERT_TRUE(context_->initialized());
+ }
+ engine_.reset(new MftH264DecodeEngine(dxva));
+ ASSERT_TRUE(engine_.get());
+ }
+ void InitDecodeEngine(int width, int height) {
+ VideoCodecConfig config;
+ config.width = width;
+ config.height = height;
+
+ // Note that although |config| is passed as reference, |config| is copied
+ // into the decode engine, so it is okay to make |config| a local variable.
+ engine_->Initialize(&loop_, handler_.get(), context_.get(), config);
+ EXPECT_EQ(1, handler_->init_count_);
+ EXPECT_EQ(MftH264DecodeEngine::kNormal, engine_->state());
+ }
+ void InitDecodeEngine() {
+ InitDecodeEngine(800, 600);
+ }
+ void TestInitAndUninit(bool dxva) {
+ GetDecodeEngine(dxva);
+ InitDecodeEngine();
+ engine_->Uninitialize();
+ }
+ void DecodeAll(scoped_refptr<BaseMftReader> reader) {
+ handler_->SetReader(reader);
+ handler_->SetDecodeEngine(engine_.get());
+ while (MftH264DecodeEngine::kStopped != engine_->state()) {
+ scoped_refptr<VideoFrame> frame;
+ engine_->ProduceVideoFrame(frame);
+ }
+ }
+ void DecodeValidVideo(const std::string& filename, int num_frames,
+ bool dxva) {
+ scoped_refptr<FFmpegFileReaderWrapper> reader(
+ new FFmpegFileReaderWrapper());
+ ASSERT_TRUE(reader.get());
+ FilePath path = GetVideoFilePath(filename);
+ ASSERT_TRUE(file_util::PathExists(path));
+ ASSERT_TRUE(reader->InitReader(WideToASCII(path.value())));
+ int actual_width;
+ int actual_height;
+ ASSERT_TRUE(reader->GetWidth(&actual_width));
+ ASSERT_TRUE(reader->GetHeight(&actual_height));
+
+ VideoCodecConfig config;
+ CreateDrawWindow(config.width, config.height);
+ GetDecodeEngine(dxva);
+ InitDecodeEngine();
+ DecodeAll(reader);
+
+ // We expect a format change when decoder receives enough data to determine
+ // the actual frame width/height.
+ EXPECT_GT(handler_->format_change_count_, 0);
+ EXPECT_EQ(actual_width, handler_->info_.stream_info.surface_width);
+ EXPECT_EQ(actual_height, handler_->info_.stream_info.surface_height);
+ EXPECT_GE(handler_->empty_buffer_callback_count_, num_frames);
+ EXPECT_EQ(num_frames, handler_->fill_buffer_callback_count_ - 1);
+ engine_->Uninitialize();
+ }
+ void ExpectDefaultDimensionsOnInput(int width, int height) {
+ GetDecodeEngine(false);
+ InitDecodeEngine(width, height);
+ EXPECT_EQ(kDecoderMaxWidth, handler_->info_.stream_info.surface_width);
+ EXPECT_EQ(kDecoderMaxHeight, handler_->info_.stream_info.surface_height);
+ engine_->Uninitialize();
+ }
+
+ scoped_ptr<SimpleMftH264DecodeEngineHandler> handler_;
+ scoped_ptr<MftH264DecodeEngine> engine_;
+ scoped_ptr<MftH264DecodeEngineContext> context_;
+
+ private:
+ void CreateDrawWindow(int width, int height) {
+ static const wchar_t kClassName[] = L"Test";
+ static const wchar_t kWindowTitle[] = L"MFT Unittest Draw Window";
+ WNDCLASS window_class = {0};
+ window_class.lpszClassName = kClassName;
+ window_class.hInstance = NULL;
+ window_class.hbrBackground = 0;
+ window_class.lpfnWndProc = DefWindowProc;
+ window_class.hCursor = 0;
+ RegisterClass(&window_class);
+ window_ = CreateWindow(kClassName,
+ kWindowTitle,
+ (WS_OVERLAPPEDWINDOW | WS_VISIBLE) &
+ ~(WS_MAXIMIZEBOX | WS_THICKFRAME),
+ 100, 100, width, height,
+ NULL, NULL, NULL, NULL);
+ ASSERT_TRUE(window_);
+ }
+ void CreateDrawWindow() {
+ CreateDrawWindow(800, 600);
+ }
+
+ MessageLoop loop_;
+ HWND window_;
+};
+
+// A simple test case for init/deinit of MF/COM libraries.
+TEST_F(MftH264DecodeEngineTest, LibraryInit) {
+ EXPECT_TRUE(MftH264DecodeEngine::StartupComLibraries());
+ MftH264DecodeEngine::ShutdownComLibraries();
+}
+
+TEST_F(MftH264DecodeEngineTest, DecoderUninitializedAtFirst) {
+ GetDecodeEngine(true);
+ EXPECT_EQ(MftH264DecodeEngine::kUninitialized, engine_->state());
+}
+
+TEST_F(MftH264DecodeEngineTest, DecoderInitMissingArgs) {
+ VideoCodecConfig config;
+ GetDecodeEngine(false);
+ engine_->Initialize(NULL, NULL, NULL, config);
+ EXPECT_EQ(MftH264DecodeEngine::kUninitialized, engine_->state());
+}
+
+TEST_F(MftH264DecodeEngineTest, DecoderInitNoDxva) {
+ TestInitAndUninit(false);
+}
+
+TEST_F(MftH264DecodeEngineTest, DecoderInitDxva) {
+ TestInitAndUninit(true);
+}
+
+TEST_F(MftH264DecodeEngineTest, DecoderUninit) {
+ TestInitAndUninit(false);
+ EXPECT_EQ(1, handler_->uninit_count_);
+ EXPECT_EQ(MftH264DecodeEngine::kUninitialized, engine_->state());
+}
+
+TEST_F(MftH264DecodeEngineTest, UninitBeforeInit) {
+ GetDecodeEngine(false);
+ engine_->Uninitialize();
+ EXPECT_EQ(0, handler_->uninit_count_);
+}
+
+TEST_F(MftH264DecodeEngineTest, InitWithNegativeDimensions) {
+ ExpectDefaultDimensionsOnInput(-123, -456);
+}
+
+TEST_F(MftH264DecodeEngineTest, InitWithTooHighDimensions) {
+ ExpectDefaultDimensionsOnInput(kDecoderMaxWidth + 1, kDecoderMaxHeight + 1);
+}
+
+TEST_F(MftH264DecodeEngineTest, DrainOnEmptyBuffer) {
+ GetDecodeEngine(false);
+ InitDecodeEngine();
+
+ // Decoder should switch to drain mode because of this NULL buffer, and then
+ // switch to kStopped when it says it needs more input during drain mode.
+ scoped_refptr<Buffer> buffer(new DataBuffer(0));
+ engine_->ConsumeVideoSample(buffer);
+ EXPECT_EQ(MftH264DecodeEngine::kStopped, engine_->state());
+
+ // Should have called back with one empty frame.
+ EXPECT_EQ(1, handler_->fill_buffer_callback_count_);
+ ASSERT_TRUE(handler_->current_frame_.get());
+ EXPECT_EQ(VideoFrame::EMPTY, handler_->current_frame_->format());
+ engine_->Uninitialize();
+}
+
+TEST_F(MftH264DecodeEngineTest, NoOutputOnGarbageInput) {
+ // 100 samples of garbage.
+ const int kNumFrames = 100;
+ scoped_refptr<FakeMftReader> reader(new FakeMftReader(kNumFrames));
+ ASSERT_TRUE(reader.get());
+
+ GetDecodeEngine(false);
+ InitDecodeEngine();
+ DecodeAll(reader);
+
+ // Output callback should only be invoked once - the empty frame to indicate
+ // end of stream.
+ EXPECT_EQ(1, handler_->fill_buffer_callback_count_);
+ ASSERT_TRUE(handler_->current_frame_.get());
+ EXPECT_EQ(VideoFrame::EMPTY, handler_->current_frame_->format());
+
+ // One extra count because of the end of stream NULL sample.
+ EXPECT_EQ(kNumFrames, handler_->empty_buffer_callback_count_ - 1);
+ engine_->Uninitialize();
+}
+
+TEST_F(MftH264DecodeEngineTest, FlushAtStart) {
+ GetDecodeEngine(false);
+ InitDecodeEngine();
+ engine_->Flush();
+
+ // Flush should succeed even if input/output are empty.
+ EXPECT_EQ(1, handler_->flush_count_);
+ engine_->Uninitialize();
+}
+
+TEST_F(MftH264DecodeEngineTest, NoFlushAtStopped) {
+ scoped_refptr<BaseMftReader> reader(new FakeMftReader());
+ ASSERT_TRUE(reader.get());
+
+ GetDecodeEngine(false);
+ InitDecodeEngine();
+ DecodeAll(reader);
+
+ EXPECT_EQ(0, handler_->flush_count_);
+ int old_flush_count = handler_->flush_count_;
+ engine_->Flush();
+ EXPECT_EQ(old_flush_count, handler_->flush_count_);
+ engine_->Uninitialize();
+}
+
+TEST_F(MftH264DecodeEngineTest, DecodeValidVideoDxva) {
+ DecodeValidVideo("bear.1280x720.mp4", 82, true);
+}
+
+TEST_F(MftH264DecodeEngineTest, DecodeValidVideoNoDxva) {
+ DecodeValidVideo("bear.1280x720.mp4", 82, false);
+}
+
+} // namespace media