summaryrefslogtreecommitdiffstats
path: root/media
diff options
context:
space:
mode:
authorscherkus@chromium.org <scherkus@chromium.org@0039d316-1c4b-4281-b951-d872f2087c98>2011-06-03 02:50:27 +0000
committerscherkus@chromium.org <scherkus@chromium.org@0039d316-1c4b-4281-b951-d872f2087c98>2011-06-03 02:50:27 +0000
commit25ea4f05da1024bc0c4ee33b655cd562be71853c (patch)
treeb2fe68b51efa93c95ec7e1d476ac7e17ba0cc062 /media
parent401ea4406626191d8994d64f13e1164859d8bb4c (diff)
downloadchromium_src-25ea4f05da1024bc0c4ee33b655cd562be71853c.zip
chromium_src-25ea4f05da1024bc0c4ee33b655cd562be71853c.tar.gz
chromium_src-25ea4f05da1024bc0c4ee33b655cd562be71853c.tar.bz2
Removing defunct Media Foundation based video decode engines and devices.
These have been unused for quite some time and are getting in the way of refactoring/improving video decoding code. BUG=none TEST=the world still compiles Review URL: http://codereview.chromium.org/7065050 git-svn-id: svn://svn.chromium.org/chrome/trunk/src@87753 0039d316-1c4b-4281-b951-d872f2087c98
Diffstat (limited to 'media')
-rw-r--r--media/media.gyp6
-rw-r--r--media/video/mft_h264_decode_engine.cc696
-rw-r--r--media/video/mft_h264_decode_engine.h106
-rw-r--r--media/video/mft_h264_decode_engine_unittest.cc411
4 files changed, 0 insertions, 1219 deletions
diff --git a/media/media.gyp b/media/media.gyp
index d90b43e..b7560c1 100644
--- a/media/media.gyp
+++ b/media/media.gyp
@@ -201,12 +201,6 @@
],
},
'conditions': [
- ['OS=="win"', {
- 'sources': [
- 'video/mft_h264_decode_engine.cc',
- 'video/mft_h264_decode_engine.h',
- ],
- }],
['OS == "linux" or OS == "freebsd" or OS == "solaris"', {
'link_settings': {
'libraries': [
diff --git a/media/video/mft_h264_decode_engine.cc b/media/video/mft_h264_decode_engine.cc
deleted file mode 100644
index e084738..0000000
--- a/media/video/mft_h264_decode_engine.cc
+++ /dev/null
@@ -1,696 +0,0 @@
-// Copyright (c) 2011 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "media/video/mft_h264_decode_engine.h"
-
-#include <d3d9.h>
-#include <dxva2api.h>
-#include <initguid.h>
-#include <mfapi.h>
-// Placed after mfapi.h to avoid linking strmiids.lib for MR_BUFFER_SERVICE.
-#include <evr.h>
-#include <mferror.h>
-#include <wmcodecdsp.h>
-
-#include "base/time.h"
-#include "base/message_loop.h"
-#include "media/base/limits.h"
-#include "media/base/pipeline.h"
-#include "media/video/video_decode_context.h"
-
-#pragma comment(lib, "dxva2.lib")
-#pragma comment(lib, "d3d9.lib")
-#pragma comment(lib, "mf.lib")
-#pragma comment(lib, "mfplat.lib")
-
-using base::TimeDelta;
-
-namespace media {
-
-// Creates an empty Media Foundation sample with no buffers.
-static IMFSample* CreateEmptySample() {
- HRESULT hr;
- base::win::ScopedComPtr<IMFSample> sample;
- hr = MFCreateSample(sample.Receive());
- if (FAILED(hr)) {
- LOG(ERROR) << "Unable to create an empty sample";
- return NULL;
- }
- return sample.Detach();
-}
-
-// Creates a Media Foundation sample with one buffer of length |buffer_length|
-// on a |align|-byte boundary. Alignment must be a perfect power of 2 or 0.
-// If |align| is 0, then no alignment is specified.
-static IMFSample* CreateEmptySampleWithBuffer(int buffer_length, int align) {
- CHECK_GT(buffer_length, 0);
- base::win::ScopedComPtr<IMFSample> sample;
- sample.Attach(CreateEmptySample());
- if (!sample.get())
- return NULL;
- base::win::ScopedComPtr<IMFMediaBuffer> buffer;
- HRESULT hr;
- if (align == 0) {
- // Note that MFCreateMemoryBuffer is same as MFCreateAlignedMemoryBuffer
- // with the align argument being 0.
- hr = MFCreateMemoryBuffer(buffer_length, buffer.Receive());
- } else {
- hr = MFCreateAlignedMemoryBuffer(buffer_length,
- align - 1,
- buffer.Receive());
- }
- if (FAILED(hr)) {
- LOG(ERROR) << "Unable to create an empty buffer";
- return NULL;
- }
- hr = sample->AddBuffer(buffer.get());
- if (FAILED(hr)) {
- LOG(ERROR) << "Failed to add empty buffer to sample";
- return NULL;
- }
- return sample.Detach();
-}
-
-// Creates a Media Foundation sample with one buffer containing a copy of the
-// given Annex B stream data.
-// If duration and sample time are not known, provide 0.
-// |min_size| specifies the minimum size of the buffer (might be required by
-// the decoder for input). The times here should be given in 100ns units.
-// |alignment| specifies the buffer in the sample to be aligned. If no
-// alignment is required, provide 0 or 1.
-static IMFSample* CreateInputSample(const uint8* stream, int size,
- int64 timestamp, int64 duration,
- int min_size, int alignment) {
- CHECK(stream);
- CHECK_GT(size, 0);
- base::win::ScopedComPtr<IMFSample> sample;
- sample.Attach(CreateEmptySampleWithBuffer(std::max(min_size, size),
- alignment));
- if (!sample.get()) {
- LOG(ERROR) << "Failed to create empty buffer for input";
- return NULL;
- }
- HRESULT hr;
- if (duration > 0) {
- hr = sample->SetSampleDuration(duration);
- if (FAILED(hr)) {
- LOG(ERROR) << "Failed to set sample duration";
- return NULL;
- }
- }
- if (timestamp > 0) {
- hr = sample->SetSampleTime(timestamp);
- if (FAILED(hr)) {
- LOG(ERROR) << "Failed to set sample time";
- return NULL;
- }
- }
- base::win::ScopedComPtr<IMFMediaBuffer> buffer;
- hr = sample->GetBufferByIndex(0, buffer.Receive());
- if (FAILED(hr)) {
- LOG(ERROR) << "Failed to get buffer in sample";
- return NULL;
- }
- DWORD max_length, current_length;
- uint8* destination;
- hr = buffer->Lock(&destination, &max_length, &current_length);
- if (FAILED(hr)) {
- LOG(ERROR) << "Failed to lock buffer";
- return NULL;
- }
- CHECK_EQ(current_length, 0u);
- CHECK_GE(static_cast<int>(max_length), size);
- memcpy(destination, stream, size);
- CHECK(SUCCEEDED(buffer->Unlock()));
- hr = buffer->SetCurrentLength(size);
- if (FAILED(hr)) {
- LOG(ERROR) << "Failed to set current length to " << size;
- return NULL;
- }
- VLOG(1) << __FUNCTION__ << " wrote " << size << " bytes into input sample";
- return sample.Detach();
-}
-
-const GUID ConvertVideoFrameFormatToGuid(VideoFrame::Format format) {
- switch (format) {
- case VideoFrame::NV12:
- return MFVideoFormat_NV12;
- case VideoFrame::YV12:
- return MFVideoFormat_YV12;
- default:
- NOTREACHED() << "Unsupported VideoFrame format";
- return GUID_NULL;
- }
- NOTREACHED();
- return GUID_NULL;
-}
-
-// public methods
-
-MftH264DecodeEngine::MftH264DecodeEngine(bool use_dxva)
- : use_dxva_(use_dxva),
- state_(kUninitialized),
- width_(0),
- height_(0),
- event_handler_(NULL),
- context_(NULL) {
- memset(&input_stream_info_, 0, sizeof(input_stream_info_));
- memset(&output_stream_info_, 0, sizeof(output_stream_info_));
- memset(&info_, 0, sizeof(info_));
-}
-
-MftH264DecodeEngine::~MftH264DecodeEngine() {
-}
-
-void MftH264DecodeEngine::Initialize(
- MessageLoop* message_loop,
- VideoDecodeEngine::EventHandler* event_handler,
- VideoDecodeContext* context,
- const VideoDecoderConfig& config) {
- DCHECK(!use_dxva_ || context);
- if (state_ != kUninitialized) {
- LOG(ERROR) << "Initialize: invalid state";
- return;
- }
- if (!message_loop || !event_handler) {
- LOG(ERROR) << "MftH264DecodeEngine::Initialize: parameters cannot be NULL";
- return;
- }
- context_ = context;
- event_handler_ = event_handler;
- info_.provides_buffers = true;
-
- if (use_dxva_) {
- info_.stream_info.surface_format = VideoFrame::NV12;
- // TODO(hclam): Need to correct this since this is not really GL texture.
- // We should just remove surface_type from stream_info.
- info_.stream_info.surface_type = VideoFrame::TYPE_GL_TEXTURE;
- } else {
- info_.stream_info.surface_format = VideoFrame::YV12;
- info_.stream_info.surface_type = VideoFrame::TYPE_SYSTEM_MEMORY;
- }
-
- // codec_info.stream_info_.surface_width_/height_ are initialized
- // in InitInternal().
- info_.success = InitInternal();
- if (info_.success) {
- state_ = kNormal;
- AllocFramesFromContext();
- } else {
- LOG(ERROR) << "MftH264DecodeEngine::Initialize failed";
- event_handler_->OnInitializeComplete(info_);
- }
-}
-
-void MftH264DecodeEngine::Uninitialize() {
- if (state_ == kUninitialized) {
- LOG(ERROR) << "Uninitialize: invalid state";
- return;
- }
-
- // TODO(hclam): Call ShutdownComLibraries only after MFT is released.
- decode_engine_.Release();
- ShutdownComLibraries();
- state_ = kUninitialized;
- event_handler_->OnUninitializeComplete();
-}
-
-void MftH264DecodeEngine::Flush() {
- if (state_ != kNormal) {
- LOG(ERROR) << "Flush: invalid state";
- return;
- }
- state_ = kFlushing;
- if (!SendMFTMessage(MFT_MESSAGE_COMMAND_FLUSH)) {
- LOG(WARNING) << "MftH264DecodeEngine::Flush failed to send message";
- }
- state_ = kNormal;
- event_handler_->OnFlushComplete();
-}
-
-void MftH264DecodeEngine::Seek() {
- if (state_ != kNormal) {
- LOG(ERROR) << "Seek: invalid state";
- return;
- }
-
- // TODO(hclam): Seriously the logic in VideoRendererBase is flawed that we
- // have to perform the following hack to get playback going.
- PipelineStatistics statistics;
- for (size_t i = 0; i < Limits::kMaxVideoFrames; ++i) {
- event_handler_->ConsumeVideoFrame(output_frames_[0], statistics);
- }
-
- // Seek not implemented.
- event_handler_->OnSeekComplete();
-}
-
-void MftH264DecodeEngine::ConsumeVideoSample(scoped_refptr<Buffer> buffer) {
- if (state_ == kUninitialized) {
- LOG(ERROR) << "ConsumeVideoSample: invalid state";
- }
- base::win::ScopedComPtr<IMFSample> sample;
- PipelineStatistics statistics;
- if (!buffer->IsEndOfStream()) {
- sample.Attach(
- CreateInputSample(buffer->GetData(),
- buffer->GetDataSize(),
- buffer->GetTimestamp().InMicroseconds() * 10,
- buffer->GetDuration().InMicroseconds() * 10,
- input_stream_info_.cbSize,
- input_stream_info_.cbAlignment));
- if (!sample.get()) {
- LOG(ERROR) << "Failed to create an input sample";
- } else {
- if (FAILED(decode_engine_->ProcessInput(0, sample.get(), 0))) {
- event_handler_->OnError();
- }
- }
-
- statistics.video_bytes_decoded = buffer->GetDataSize();
- } else {
- if (state_ != MftH264DecodeEngine::kEosDrain) {
- // End of stream, send drain messages.
- if (!SendMFTMessage(MFT_MESSAGE_NOTIFY_END_OF_STREAM) ||
- !SendMFTMessage(MFT_MESSAGE_COMMAND_DRAIN)) {
- LOG(ERROR) << "Failed to send EOS / drain messages to MFT";
- event_handler_->OnError();
- } else {
- state_ = MftH264DecodeEngine::kEosDrain;
- }
- }
- }
- DoDecode(statistics);
-}
-
-void MftH264DecodeEngine::ProduceVideoFrame(scoped_refptr<VideoFrame> frame) {
- if (state_ == kUninitialized) {
- LOG(ERROR) << "ProduceVideoFrame: invalid state";
- return;
- }
- event_handler_->ProduceVideoSample(NULL);
-}
-
-// private methods
-
-// static
-bool MftH264DecodeEngine::StartupComLibraries() {
- HRESULT hr;
- hr = CoInitializeEx(NULL,
- COINIT_APARTMENTTHREADED | COINIT_DISABLE_OLE1DDE);
- if (FAILED(hr)) {
- LOG(ERROR) << "CoInit fail";
- return false;
- }
-
- hr = MFStartup(MF_VERSION, MFSTARTUP_FULL);
- if (FAILED(hr)) {
- LOG(ERROR) << "MFStartup fail";
- CoUninitialize();
- return false;
- }
- return true;
-}
-
-// static
-void MftH264DecodeEngine::ShutdownComLibraries() {
- HRESULT hr;
- hr = MFShutdown();
- if (FAILED(hr)) {
- LOG(WARNING) << "Warning: MF failed to shutdown";
- }
- CoUninitialize();
-}
-
-bool MftH264DecodeEngine::EnableDxva() {
- IDirect3DDevice9* device = static_cast<IDirect3DDevice9*>(
- context_->GetDevice());
- base::win::ScopedComPtr<IDirect3DDeviceManager9> device_manager;
- UINT dev_manager_reset_token = 0;
- HRESULT hr = DXVA2CreateDirect3DDeviceManager9(&dev_manager_reset_token,
- device_manager.Receive());
- if (FAILED(hr)) {
- LOG(ERROR) << "Couldn't create D3D Device manager";
- return false;
- }
-
- hr = device_manager->ResetDevice(device, dev_manager_reset_token);
- if (FAILED(hr)) {
- LOG(ERROR) << "Failed to reset device";
- return false;
- }
-
- hr = decode_engine_->ProcessMessage(
- MFT_MESSAGE_SET_D3D_MANAGER,
- reinterpret_cast<ULONG_PTR>(device_manager.get()));
- if (FAILED(hr)) {
- LOG(ERROR) << "Failed to set D3D9 device manager to decoder "
- << std::hex << hr;
- return false;
- }
-
- return true;
-}
-
-bool MftH264DecodeEngine::InitInternal() {
- if (!StartupComLibraries())
- return false;
- if (!InitDecodeEngine())
- return false;
- if (!GetStreamsInfoAndBufferReqs())
- return false;
- return SendMFTMessage(MFT_MESSAGE_NOTIFY_BEGIN_STREAMING);
-}
-
-bool MftH264DecodeEngine::InitDecodeEngine() {
- // TODO(jiesun): use MFEnum to get decoder CLSID.
- HRESULT hr = CoCreateInstance(__uuidof(CMSH264DecoderMFT),
- NULL,
- CLSCTX_INPROC_SERVER,
- __uuidof(IMFTransform),
- reinterpret_cast<void**>(
- decode_engine_.Receive()));
- if (FAILED(hr) || !decode_engine_.get()) {
- LOG(ERROR) << "CoCreateInstance failed " << std::hex << std::showbase << hr;
- return false;
- }
- if (!CheckDecodeEngineDxvaSupport())
- return false;
- if (use_dxva_ && !EnableDxva())
- return false;
- return SetDecodeEngineMediaTypes();
-}
-
-void MftH264DecodeEngine::AllocFramesFromContext() {
- if (!use_dxva_)
- return;
-
- // TODO(imcheng): Pass in an actual task. (From EventHandler?)
- context_->ReleaseAllVideoFrames();
- output_frames_.clear();
- context_->AllocateVideoFrames(
- 1, info_.stream_info.surface_width, info_.stream_info.surface_height,
- VideoFrame::RGBA, &output_frames_,
- NewRunnableMethod(this, &MftH264DecodeEngine::OnAllocFramesDone));
-}
-
-void MftH264DecodeEngine::OnAllocFramesDone() {
- event_handler_->OnInitializeComplete(info_);
-}
-
-bool MftH264DecodeEngine::CheckDecodeEngineDxvaSupport() {
- base::win::ScopedComPtr<IMFAttributes> attributes;
- HRESULT hr = decode_engine_->GetAttributes(attributes.Receive());
- if (FAILED(hr)) {
- LOG(ERROR) << "Unlock: Failed to get attributes, hr = "
- << std::hex << std::showbase << hr;
- return false;
- }
-
- UINT32 dxva;
- hr = attributes->GetUINT32(MF_SA_D3D_AWARE, &dxva);
- if (FAILED(hr) || !dxva) {
- LOG(ERROR) << "Failed to get DXVA attr or decoder is not DXVA-aware, hr = "
- << std::hex << std::showbase << hr
- << " this might not be the right decoder.";
- return false;
- }
- return true;
-}
-
-bool MftH264DecodeEngine::SetDecodeEngineMediaTypes() {
- if (!SetDecodeEngineInputMediaType())
- return false;
- return SetDecodeEngineOutputMediaType(
- ConvertVideoFrameFormatToGuid(info_.stream_info.surface_format));
-}
-
-bool MftH264DecodeEngine::SetDecodeEngineInputMediaType() {
- base::win::ScopedComPtr<IMFMediaType> media_type;
- HRESULT hr = MFCreateMediaType(media_type.Receive());
- if (FAILED(hr)) {
- LOG(ERROR) << "Failed to create empty media type object";
- return false;
- }
-
- hr = media_type->SetGUID(MF_MT_MAJOR_TYPE, MFMediaType_Video);
- if (FAILED(hr)) {
- LOG(ERROR) << "SetGUID for major type failed";
- return false;
- }
-
- hr = media_type->SetGUID(MF_MT_SUBTYPE, MFVideoFormat_H264);
- if (FAILED(hr)) {
- LOG(ERROR) << "SetGUID for subtype failed";
- return false;
- }
-
- hr = decode_engine_->SetInputType(0, media_type.get(), 0); // No flags
- if (FAILED(hr)) {
- LOG(ERROR) << "Failed to set decoder's input type";
- return false;
- }
-
- return true;
-}
-
-bool MftH264DecodeEngine::SetDecodeEngineOutputMediaType(const GUID subtype) {
- DWORD i = 0;
- IMFMediaType* out_media_type;
- bool found = false;
- while (SUCCEEDED(decode_engine_->GetOutputAvailableType(0, i,
- &out_media_type))) {
- GUID out_subtype;
- HRESULT hr = out_media_type->GetGUID(MF_MT_SUBTYPE, &out_subtype);
- if (FAILED(hr)) {
- LOG(ERROR) << "Failed to GetGUID() on GetOutputAvailableType() " << i;
- out_media_type->Release();
- continue;
- }
- if (out_subtype == subtype) {
- hr = decode_engine_->SetOutputType(0, out_media_type, 0); // No flags
- hr = MFGetAttributeSize(out_media_type, MF_MT_FRAME_SIZE,
- reinterpret_cast<UINT32*>(&info_.stream_info.surface_width),
- reinterpret_cast<UINT32*>(&info_.stream_info.surface_height));
- width_ = info_.stream_info.surface_width;
- height_ = info_.stream_info.surface_height;
- if (FAILED(hr)) {
- LOG(ERROR) << "Failed to SetOutputType to |subtype| or obtain "
- << "width/height " << std::hex << hr;
- } else {
- out_media_type->Release();
- return true;
- }
- }
- i++;
- out_media_type->Release();
- }
- return false;
-}
-
-bool MftH264DecodeEngine::SendMFTMessage(MFT_MESSAGE_TYPE msg) {
- HRESULT hr = decode_engine_->ProcessMessage(msg, NULL);
- return SUCCEEDED(hr);
-}
-
-// Prints out info about the input/output streams, gets the minimum buffer sizes
-// for input and output samples.
-// The MFT will not allocate buffer for neither input nor output, so we have
-// to do it ourselves and make sure they're the correct size.
-// Exception is when dxva is enabled, the decoder will allocate output.
-bool MftH264DecodeEngine::GetStreamsInfoAndBufferReqs() {
- HRESULT hr = decode_engine_->GetInputStreamInfo(0, &input_stream_info_);
- if (FAILED(hr)) {
- LOG(ERROR) << "Failed to get input stream info";
- return false;
- }
- VLOG(1) << "Input stream info:"
- << "\nMax latency: " << input_stream_info_.hnsMaxLatency
- << "\nFlags: " << std::hex << std::showbase
- << input_stream_info_.dwFlags
- << "\nMin buffer size: " << input_stream_info_.cbSize
- << "\nMax lookahead: " << input_stream_info_.cbMaxLookahead
- << "\nAlignment: " << input_stream_info_.cbAlignment;
- // There should be three flags, one for requiring a whole frame be in a
- // single sample, one for requiring there be one buffer only in a single
- // sample, and one that specifies a fixed sample size. (as in cbSize)
- CHECK_EQ(input_stream_info_.dwFlags, 0x7u);
-
- hr = decode_engine_->GetOutputStreamInfo(0, &output_stream_info_);
- if (FAILED(hr)) {
- LOG(ERROR) << "Failed to get output stream info";
- return false;
- }
- VLOG(1) << "Output stream info:"
- << "\nFlags: " << std::hex << std::showbase
- << output_stream_info_.dwFlags
- << "\nMin buffer size: " << output_stream_info_.cbSize
- << "\nAlignment: " << output_stream_info_.cbAlignment;
- // The flags here should be the same and mean the same thing, except when
- // DXVA is enabled, there is an extra 0x100 flag meaning decoder will
- // allocate its own sample.
- CHECK_EQ(output_stream_info_.dwFlags, use_dxva_ ? 0x107u : 0x7u);
-
- return true;
-}
-
-bool MftH264DecodeEngine::DoDecode(const PipelineStatistics& statistics) {
- if (state_ != kNormal && state_ != kEosDrain) {
- LOG(ERROR) << "DoDecode: not in normal or drain state";
- return false;
- }
- scoped_refptr<VideoFrame> frame;
- base::win::ScopedComPtr<IMFSample> output_sample;
- if (!use_dxva_) {
- output_sample.Attach(
- CreateEmptySampleWithBuffer(output_stream_info_.cbSize,
- output_stream_info_.cbAlignment));
- if (!output_sample.get()) {
- LOG(ERROR) << "GetSample: failed to create empty output sample";
- event_handler_->OnError();
- return false;
- }
- }
- MFT_OUTPUT_DATA_BUFFER output_data_buffer;
- memset(&output_data_buffer, 0, sizeof(output_data_buffer));
- output_data_buffer.dwStreamID = 0;
- output_data_buffer.pSample = output_sample;
-
- DWORD status;
- HRESULT hr = decode_engine_->ProcessOutput(0, // No flags
- 1, // # of out streams to pull
- &output_data_buffer,
- &status);
-
- IMFCollection* events = output_data_buffer.pEvents;
- if (events != NULL) {
- VLOG(1) << "Got events from ProcessOuput, but discarding";
- events->Release();
- }
-
- if (FAILED(hr)) {
- if (hr == MF_E_TRANSFORM_STREAM_CHANGE) {
- hr = SetDecodeEngineOutputMediaType(
- ConvertVideoFrameFormatToGuid(info_.stream_info.surface_format));
- if (SUCCEEDED(hr)) {
- // TODO(hclam): Need to fix this case. This happens when we have a
- // format change. We have to resume decoding only after we have
- // allocated a new set of video frames.
- // AllocFramesFromContext();
- // event_handler_->OnFormatChange(info_.stream_info);
- event_handler_->ProduceVideoSample(NULL);
- return true;
- }
- event_handler_->OnError();
- return false;
- }
- if (hr == MF_E_TRANSFORM_NEED_MORE_INPUT) {
- if (state_ == kEosDrain) {
- // No more output from the decoder. Notify EOS and stop playback.
- scoped_refptr<VideoFrame> frame;
- VideoFrame::CreateEmptyFrame(&frame);
- event_handler_->ConsumeVideoFrame(frame, statistics);
- state_ = MftH264DecodeEngine::kStopped;
- return false;
- }
- event_handler_->ProduceVideoSample(NULL);
- return true;
- }
- LOG(ERROR) << "Unhandled error in DoDecode()";
- state_ = MftH264DecodeEngine::kStopped;
- event_handler_->OnError();
- return false;
- }
-
- // We succeeded in getting an output sample.
- if (use_dxva_) {
- // For DXVA we didn't provide the sample, i.e. output_sample was NULL.
- output_sample.Attach(output_data_buffer.pSample);
- }
- if (!output_sample.get()) {
- LOG(ERROR) << "ProcessOutput succeeded, but did not get a sample back";
- event_handler_->OnError();
- return true;
- }
-
- int64 timestamp = 0, duration = 0;
- if (FAILED(output_sample->GetSampleTime(&timestamp)) ||
- FAILED(output_sample->GetSampleDuration(&duration))) {
- LOG(WARNING) << "Failed to get timestamp/duration from output";
- }
-
- // The duration and timestamps are in 100-ns units, so divide by 10
- // to convert to microseconds.
- timestamp /= 10;
- duration /= 10;
-
- // Sanity checks for checking if there is really something in the sample.
- DWORD buf_count;
- hr = output_sample->GetBufferCount(&buf_count);
- if (FAILED(hr) || buf_count != 1) {
- LOG(ERROR) << "Failed to get buffer count, or buffer count mismatch";
- return true;
- }
-
- base::win::ScopedComPtr<IMFMediaBuffer> output_buffer;
- hr = output_sample->GetBufferByIndex(0, output_buffer.Receive());
- if (FAILED(hr)) {
- LOG(ERROR) << "Failed to get buffer from sample";
- return true;
- }
- if (use_dxva_) {
- base::win::ScopedComPtr<IDirect3DSurface9, &IID_IDirect3DSurface9> surface;
- hr = MFGetService(output_buffer, MR_BUFFER_SERVICE,
- IID_PPV_ARGS(surface.Receive()));
- if (FAILED(hr)) {
- LOG(ERROR) << "Failed to get surface from buffer";
- return true;
- }
- // Since we only allocated 1 frame from context.
- // TODO(imcheng): Detect error.
- output_frames_[0]->SetTimestamp(TimeDelta::FromMicroseconds(timestamp));
- output_frames_[0]->SetDuration(TimeDelta::FromMicroseconds(duration));
- context_->ConvertToVideoFrame(
- surface.get(), output_frames_[0],
- NewRunnableMethod(this, &MftH264DecodeEngine::OnUploadVideoFrameDone,
- surface, output_frames_[0], statistics));
- return true;
- }
- // TODO(hclam): Remove this branch.
- // Not DXVA.
- VideoFrame::CreateFrame(info_.stream_info.surface_format,
- info_.stream_info.surface_width,
- info_.stream_info.surface_height,
- TimeDelta::FromMicroseconds(timestamp),
- TimeDelta::FromMicroseconds(duration),
- &frame);
- if (!frame.get()) {
- LOG(ERROR) << "Failed to allocate video frame for yuv plane";
- event_handler_->OnError();
- return true;
- }
- uint8* src_y;
- DWORD max_length, current_length;
- hr = output_buffer->Lock(&src_y, &max_length, &current_length);
- if (FAILED(hr))
- return true;
- uint8* dst_y = static_cast<uint8*>(frame->data(VideoFrame::kYPlane));
-
- memcpy(dst_y, src_y, current_length);
- CHECK(SUCCEEDED(output_buffer->Unlock()));
- event_handler_->ConsumeVideoFrame(frame, statistics);
- return true;
-}
-
-void MftH264DecodeEngine::OnUploadVideoFrameDone(
- base::win::ScopedComPtr<IDirect3DSurface9, &IID_IDirect3DSurface9> surface,
- scoped_refptr<VideoFrame> frame,
- PipelineStatistics statistics) {
- // After this method is exited the reference to surface is released.
- event_handler_->ConsumeVideoFrame(frame, statistics);
-}
-
-} // namespace media
-
-DISABLE_RUNNABLE_METHOD_REFCOUNT(media::MftH264DecodeEngine);
diff --git a/media/video/mft_h264_decode_engine.h b/media/video/mft_h264_decode_engine.h
deleted file mode 100644
index a6ff52c..0000000
--- a/media/video/mft_h264_decode_engine.h
+++ /dev/null
@@ -1,106 +0,0 @@
-// Copyright (c) 2011 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-//
-// MFT H.264 decode engine.
-
-#ifndef MEDIA_VIDEO_MFT_H264_DECODE_ENGINE_H_
-#define MEDIA_VIDEO_MFT_H264_DECODE_ENGINE_H_
-
-// TODO(imcheng): Get rid of this header by:
-// - forward declaring IMFTransform and its IID as in
-// mft_h264_decode_engine_context.h
-// - turning the general SendMFTMessage method into specific methods
-// (SendFlushMessage, SendDrainMessage, etc.) to avoid having
-// MFT_MESSAGE_TYPE in here
-#include <mfidl.h>
-#include <vector>
-
-#include "base/gtest_prod_util.h"
-#include "base/win/scoped_comptr.h"
-#include "media/video/video_decode_engine.h"
-
-struct IDirect3DSurface9;
-extern "C" const GUID IID_IDirect3DSurface9;
-
-class MessageLoop;
-
-namespace media {
-
-class VideoDecodeContext;
-
-class MftH264DecodeEngine : public media::VideoDecodeEngine {
- public:
- typedef enum {
- kUninitialized, // un-initialized.
- kNormal, // normal playing state.
- kFlushing, // upon received Flush(), before FlushDone()
- kEosDrain, // upon input EOS received.
- kStopped, // upon output EOS received.
- } State;
-
- explicit MftH264DecodeEngine(bool use_dxva);
- virtual ~MftH264DecodeEngine();
-
- // VideoDecodeEngine implementation.
- virtual void Initialize(MessageLoop* message_loop,
- media::VideoDecodeEngine::EventHandler* event_handler,
- VideoDecodeContext* context,
- const VideoDecoderConfig& config);
- virtual void Uninitialize();
- virtual void Flush();
- virtual void Seek();
- virtual void ConsumeVideoSample(scoped_refptr<Buffer> buffer);
- virtual void ProduceVideoFrame(scoped_refptr<VideoFrame> frame);
-
- bool use_dxva() const { return use_dxva_; }
- State state() const { return state_; }
-
- private:
- friend class MftH264DecodeEngineTest;
- FRIEND_TEST_ALL_PREFIXES(MftH264DecodeEngineTest, LibraryInit);
-
- // TODO(jiesun): Find a way to move all these to GpuVideoService..
- static bool StartupComLibraries();
- static void ShutdownComLibraries();
- bool EnableDxva();
-
- bool InitInternal();
- bool InitDecodeEngine();
- void AllocFramesFromContext();
- bool CheckDecodeEngineDxvaSupport();
- bool SetDecodeEngineMediaTypes();
- bool SetDecodeEngineInputMediaType();
- bool SetDecodeEngineOutputMediaType(const GUID subtype);
- bool SendMFTMessage(MFT_MESSAGE_TYPE msg);
- bool GetStreamsInfoAndBufferReqs();
- bool DoDecode(const PipelineStatistics& statistics);
- void OnAllocFramesDone();
- void OnUploadVideoFrameDone(
- base::win::ScopedComPtr<IDirect3DSurface9,
- &IID_IDirect3DSurface9> surface,
- scoped_refptr<media::VideoFrame> frame, PipelineStatistics statistics);
-
- bool use_dxva_;
- base::win::ScopedComPtr<IMFTransform> decode_engine_;
-
- MFT_INPUT_STREAM_INFO input_stream_info_;
- MFT_OUTPUT_STREAM_INFO output_stream_info_;
-
- State state_;
-
- int width_;
- int height_;
-
- VideoDecodeEngine::EventHandler* event_handler_;
- VideoCodecInfo info_;
-
- VideoDecodeContext* context_;
- std::vector<scoped_refptr<VideoFrame> > output_frames_;
-
- DISALLOW_COPY_AND_ASSIGN(MftH264DecodeEngine);
-};
-
-} // namespace media
-
-#endif // MEDIA_VIDEO_MFT_H264_DECODE_ENGINE_H_
diff --git a/media/video/mft_h264_decode_engine_unittest.cc b/media/video/mft_h264_decode_engine_unittest.cc
deleted file mode 100644
index 60d2a77..0000000
--- a/media/video/mft_h264_decode_engine_unittest.cc
+++ /dev/null
@@ -1,411 +0,0 @@
-// Copyright (c) 2011 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "base/file_path.h"
-#include "base/file_util.h"
-#include "base/memory/ref_counted.h"
-#include "base/memory/scoped_ptr.h"
-#include "base/message_loop.h"
-#include "base/path_service.h"
-#include "base/string_util.h"
-#include "base/time.h"
-#include "media/base/data_buffer.h"
-#include "media/base/video_frame.h"
-#include "media/tools/mft_h264_example/file_reader_util.h"
-#include "media/video/mft_h264_decode_engine.h"
-#include "media/video/mft_h264_decode_engine_context.h"
-#include "testing/gtest/include/gtest/gtest.h"
-
-using base::TimeDelta;
-
-namespace media {
-
-static const int kDecoderMaxWidth = 1920;
-static const int kDecoderMaxHeight = 1088;
-
-// Helper classes
-
-class BaseMftReader : public base::RefCountedThreadSafe<BaseMftReader> {
- public:
- virtual ~BaseMftReader() {}
- virtual void ReadCallback(scoped_refptr<DataBuffer>* input) = 0;
-};
-
-class FakeMftReader : public BaseMftReader {
- public:
- FakeMftReader() : frames_remaining_(20) {}
- explicit FakeMftReader(int count) : frames_remaining_(count) {}
- virtual ~FakeMftReader() {}
-
- // Provides garbage input to the decoder.
- virtual void ReadCallback(scoped_refptr<DataBuffer>* input) {
- if (frames_remaining_ > 0) {
- int sz = 4096;
- uint8* buf = new uint8[sz];
- memset(buf, 42, sz);
- *input = new DataBuffer(buf, sz);
- (*input)->SetDuration(base::TimeDelta::FromMicroseconds(5000));
- (*input)->SetTimestamp(
- base::TimeDelta::FromMicroseconds(
- 50000000 - frames_remaining_ * 10000));
- --frames_remaining_;
- } else {
- // Emulate end of stream on the last "frame".
- *input = new DataBuffer(0);
- }
- }
- int frames_remaining() const { return frames_remaining_; }
-
- private:
- int frames_remaining_;
-};
-
-class SimpleMftH264DecodeEngineHandler
- : public VideoDecodeEngine::EventHandler {
- public:
- SimpleMftH264DecodeEngineHandler()
- : init_count_(0),
- uninit_count_(0),
- flush_count_(0),
- format_change_count_(0),
- empty_buffer_callback_count_(0),
- fill_buffer_callback_count_(0) {
- memset(&info_, 0, sizeof(info_));
- }
- virtual ~SimpleMftH264DecodeEngineHandler() {}
- virtual void OnInitializeComplete(const VideoCodecInfo& info) {
- info_ = info;
- init_count_++;
- }
- virtual void OnUninitializeComplete() {
- uninit_count_++;
- }
- virtual void OnFlushComplete() {
- flush_count_++;
- }
- virtual void OnSeekComplete() {}
- virtual void OnError() {}
- virtual void OnFormatChange(VideoStreamInfo stream_info) {
- format_change_count_++;
- info_.stream_info = stream_info;
- }
- virtual void ProduceVideoSample(scoped_refptr<Buffer> buffer) {
- if (reader_.get() && decoder_) {
- empty_buffer_callback_count_++;
- scoped_refptr<DataBuffer> input;
- reader_->ReadCallback(&input);
- decoder_->ConsumeVideoSample(input);
- }
- }
- virtual void ConsumeVideoFrame(scoped_refptr<VideoFrame> frame,
- const PipelineStatistics& statistics) {
- fill_buffer_callback_count_++;
- current_frame_ = frame;
- }
- void SetReader(scoped_refptr<BaseMftReader> reader) {
- reader_ = reader;
- }
- void SetDecodeEngine(MftH264DecodeEngine* decoder) {
- decoder_ = decoder;
- }
-
- int init_count_;
- int uninit_count_;
- int flush_count_;
- int format_change_count_;
- int empty_buffer_callback_count_;
- int fill_buffer_callback_count_;
- VideoCodecInfo info_;
- scoped_refptr<BaseMftReader> reader_;
- MftH264DecodeEngine* decoder_;
- scoped_refptr<VideoFrame> current_frame_;
-};
-
-class FFmpegFileReaderWrapper : public BaseMftReader {
- public:
- FFmpegFileReaderWrapper() {}
- virtual ~FFmpegFileReaderWrapper() {}
- bool InitReader(const std::string& filename) {
- reader_.reset(new FFmpegFileReader(filename));
- if (!reader_.get() || !reader_->Initialize()) {
- reader_.reset();
- return false;
- }
- return true;
- }
- virtual void ReadCallback(scoped_refptr<DataBuffer>* input) {
- if (reader_.get()) {
- reader_->Read(input);
- }
- }
- bool GetWidth(int* width) {
- if (!reader_.get())
- return false;
- return reader_->GetWidth(width);
- }
- bool GetHeight(int* height) {
- if (!reader_.get())
- return false;
- return reader_->GetHeight(height);
- }
- scoped_ptr<FFmpegFileReader> reader_;
-};
-
-// Helper functions
-
-static FilePath GetVideoFilePath(const std::string& file_name) {
- FilePath path;
- PathService::Get(base::DIR_SOURCE_ROOT, &path);
- path = path.AppendASCII("media")
- .AppendASCII("test")
- .AppendASCII("data")
- .AppendASCII(file_name.c_str());
- return path;
-}
-
-class MftH264DecodeEngineTest : public testing::Test {
- protected:
- MftH264DecodeEngineTest()
- : loop_(),
- window_(NULL),
- handler_(NULL),
- engine_(NULL),
- context_(NULL) {
- }
- virtual ~MftH264DecodeEngineTest() {}
- virtual void SetUp() {
- handler_.reset(new SimpleMftH264DecodeEngineHandler());
- }
- virtual void TearDown() {
- if (context_.get()) {
- context_->ReleaseAllVideoFrames();
- context_->Destroy(NULL);
- }
- if (window_)
- DestroyWindow(window_);
- }
- void GetDecodeEngine(bool dxva) {
- if (dxva) {
- if (!window_)
- CreateDrawWindow();
- context_.reset(new MftH264DecodeEngineContext(window_));
- ASSERT_TRUE(context_.get());
- context_->Initialize(NULL);
- ASSERT_TRUE(context_->initialized());
- }
- engine_.reset(new MftH264DecodeEngine(dxva));
- ASSERT_TRUE(engine_.get());
- }
- void InitDecodeEngine(int width, int height) {
- VideoDecoderConfig config;
- config.width = width;
- config.height = height;
-
- // Note that although |config| is passed as reference, |config| is copied
- // into the decode engine, so it is okay to make |config| a local variable.
- engine_->Initialize(&loop_, handler_.get(), context_.get(), config);
- EXPECT_EQ(1, handler_->init_count_);
- EXPECT_EQ(MftH264DecodeEngine::kNormal, engine_->state());
- }
- void InitDecodeEngine() {
- InitDecodeEngine(800, 600);
- }
- void TestInitAndUninit(bool dxva) {
- GetDecodeEngine(dxva);
- InitDecodeEngine();
- engine_->Uninitialize();
- }
- void DecodeAll(scoped_refptr<BaseMftReader> reader) {
- handler_->SetReader(reader);
- handler_->SetDecodeEngine(engine_.get());
- while (MftH264DecodeEngine::kStopped != engine_->state()) {
- scoped_refptr<VideoFrame> frame;
- engine_->ProduceVideoFrame(frame);
- }
- }
- void DecodeValidVideo(const std::string& filename, int num_frames,
- bool dxva) {
- scoped_refptr<FFmpegFileReaderWrapper> reader(
- new FFmpegFileReaderWrapper());
- ASSERT_TRUE(reader.get());
- FilePath path = GetVideoFilePath(filename);
- ASSERT_TRUE(file_util::PathExists(path));
- ASSERT_TRUE(reader->InitReader(WideToASCII(path.value())));
- int actual_width;
- int actual_height;
- ASSERT_TRUE(reader->GetWidth(&actual_width));
- ASSERT_TRUE(reader->GetHeight(&actual_height));
-
- VideoDecoderConfig config;
- CreateDrawWindow(config.width, config.height);
- GetDecodeEngine(dxva);
- InitDecodeEngine();
- DecodeAll(reader);
-
- // We expect a format change when decoder receives enough data to determine
- // the actual frame width/height.
- EXPECT_GT(handler_->format_change_count_, 0);
- EXPECT_EQ(actual_width, handler_->info_.stream_info.surface_width);
- EXPECT_EQ(actual_height, handler_->info_.stream_info.surface_height);
- EXPECT_GE(handler_->empty_buffer_callback_count_, num_frames);
- EXPECT_EQ(num_frames, handler_->fill_buffer_callback_count_ - 1);
- engine_->Uninitialize();
- }
- void ExpectDefaultDimensionsOnInput(int width, int height) {
- GetDecodeEngine(false);
- InitDecodeEngine(width, height);
- EXPECT_EQ(kDecoderMaxWidth, handler_->info_.stream_info.surface_width);
- EXPECT_EQ(kDecoderMaxHeight, handler_->info_.stream_info.surface_height);
- engine_->Uninitialize();
- }
-
- scoped_ptr<SimpleMftH264DecodeEngineHandler> handler_;
- scoped_ptr<MftH264DecodeEngine> engine_;
- scoped_ptr<MftH264DecodeEngineContext> context_;
-
- private:
- void CreateDrawWindow(int width, int height) {
- static const wchar_t kClassName[] = L"Test";
- static const wchar_t kWindowTitle[] = L"MFT Unittest Draw Window";
- WNDCLASS window_class = {0};
- window_class.lpszClassName = kClassName;
- window_class.hInstance = NULL;
- window_class.hbrBackground = 0;
- window_class.lpfnWndProc = DefWindowProc;
- window_class.hCursor = 0;
- RegisterClass(&window_class);
- window_ = CreateWindow(kClassName,
- kWindowTitle,
- (WS_OVERLAPPEDWINDOW | WS_VISIBLE) &
- ~(WS_MAXIMIZEBOX | WS_THICKFRAME),
- 100, 100, width, height,
- NULL, NULL, NULL, NULL);
- ASSERT_TRUE(window_);
- }
- void CreateDrawWindow() {
- CreateDrawWindow(800, 600);
- }
-
- MessageLoop loop_;
- HWND window_;
-};
-
-// A simple test case for init/deinit of MF/COM libraries.
-TEST_F(MftH264DecodeEngineTest, LibraryInit) {
- EXPECT_TRUE(MftH264DecodeEngine::StartupComLibraries());
- MftH264DecodeEngine::ShutdownComLibraries();
-}
-
-TEST_F(MftH264DecodeEngineTest, DecoderUninitializedAtFirst) {
- GetDecodeEngine(true);
- EXPECT_EQ(MftH264DecodeEngine::kUninitialized, engine_->state());
-}
-
-TEST_F(MftH264DecodeEngineTest, DecoderInitMissingArgs) {
- VideoDecoderConfig config;
- GetDecodeEngine(false);
- engine_->Initialize(NULL, NULL, NULL, config);
- EXPECT_EQ(MftH264DecodeEngine::kUninitialized, engine_->state());
-}
-
-TEST_F(MftH264DecodeEngineTest, DecoderInitNoDxva) {
- TestInitAndUninit(false);
-}
-
-TEST_F(MftH264DecodeEngineTest, DecoderInitDxva) {
- TestInitAndUninit(true);
-}
-
-TEST_F(MftH264DecodeEngineTest, DecoderUninit) {
- TestInitAndUninit(false);
- EXPECT_EQ(1, handler_->uninit_count_);
- EXPECT_EQ(MftH264DecodeEngine::kUninitialized, engine_->state());
-}
-
-TEST_F(MftH264DecodeEngineTest, UninitBeforeInit) {
- GetDecodeEngine(false);
- engine_->Uninitialize();
- EXPECT_EQ(0, handler_->uninit_count_);
-}
-
-TEST_F(MftH264DecodeEngineTest, InitWithNegativeDimensions) {
- ExpectDefaultDimensionsOnInput(-123, -456);
-}
-
-TEST_F(MftH264DecodeEngineTest, InitWithTooHighDimensions) {
- ExpectDefaultDimensionsOnInput(kDecoderMaxWidth + 1, kDecoderMaxHeight + 1);
-}
-
-TEST_F(MftH264DecodeEngineTest, DrainOnEmptyBuffer) {
- GetDecodeEngine(false);
- InitDecodeEngine();
-
- // Decoder should switch to drain mode because of this NULL buffer, and then
- // switch to kStopped when it says it needs more input during drain mode.
- scoped_refptr<Buffer> buffer(new DataBuffer(0));
- engine_->ConsumeVideoSample(buffer);
- EXPECT_EQ(MftH264DecodeEngine::kStopped, engine_->state());
-
- // Should have called back with one empty frame.
- EXPECT_EQ(1, handler_->fill_buffer_callback_count_);
- ASSERT_TRUE(handler_->current_frame_.get());
- EXPECT_EQ(VideoFrame::EMPTY, handler_->current_frame_->format());
- engine_->Uninitialize();
-}
-
-TEST_F(MftH264DecodeEngineTest, NoOutputOnGarbageInput) {
- // 100 samples of garbage.
- const int kNumFrames = 100;
- scoped_refptr<FakeMftReader> reader(new FakeMftReader(kNumFrames));
- ASSERT_TRUE(reader.get());
-
- GetDecodeEngine(false);
- InitDecodeEngine();
- DecodeAll(reader);
-
- // Output callback should only be invoked once - the empty frame to indicate
- // end of stream.
- EXPECT_EQ(1, handler_->fill_buffer_callback_count_);
- ASSERT_TRUE(handler_->current_frame_.get());
- EXPECT_EQ(VideoFrame::EMPTY, handler_->current_frame_->format());
-
- // One extra count because of the end of stream NULL sample.
- EXPECT_EQ(kNumFrames, handler_->empty_buffer_callback_count_ - 1);
- engine_->Uninitialize();
-}
-
-TEST_F(MftH264DecodeEngineTest, FlushAtStart) {
- GetDecodeEngine(false);
- InitDecodeEngine();
- engine_->Flush();
-
- // Flush should succeed even if input/output are empty.
- EXPECT_EQ(1, handler_->flush_count_);
- engine_->Uninitialize();
-}
-
-TEST_F(MftH264DecodeEngineTest, NoFlushAtStopped) {
- scoped_refptr<BaseMftReader> reader(new FakeMftReader());
- ASSERT_TRUE(reader.get());
-
- GetDecodeEngine(false);
- InitDecodeEngine();
- DecodeAll(reader);
-
- EXPECT_EQ(0, handler_->flush_count_);
- int old_flush_count = handler_->flush_count_;
- engine_->Flush();
- EXPECT_EQ(old_flush_count, handler_->flush_count_);
- engine_->Uninitialize();
-}
-
-TEST_F(MftH264DecodeEngineTest, DecodeValidVideoDxva) {
- DecodeValidVideo("bear.1280x720.mp4", 82, true);
-}
-
-TEST_F(MftH264DecodeEngineTest, DecodeValidVideoNoDxva) {
- DecodeValidVideo("bear.1280x720.mp4", 82, false);
-}
-
-} // namespace media