summaryrefslogtreecommitdiffstats
path: root/content/common
diff options
context:
space:
mode:
authorananta@chromium.org <ananta@chromium.org@0039d316-1c4b-4281-b951-d872f2087c98>2011-12-22 03:44:18 +0000
committerananta@chromium.org <ananta@chromium.org@0039d316-1c4b-4281-b951-d872f2087c98>2011-12-22 03:44:18 +0000
commit01eb8ab2badce7c4746dcb34f97654a28d79b870 (patch)
tree77dc69f9a2094544dc88aa9022d2dd415686cbc5 /content/common
parent24838d910daf2635cf22dd005014d55099ca7dd5 (diff)
downloadchromium_src-01eb8ab2badce7c4746dcb34f97654a28d79b870.zip
chromium_src-01eb8ab2badce7c4746dcb34f97654a28d79b870.tar.gz
chromium_src-01eb8ab2badce7c4746dcb34f97654a28d79b870.tar.bz2
Initial implementation of the DXVA 2.0 H.264 hardware decoder for pepper for Windows. The decoding is done
using the Microsoft Media Foundation API. To render the output bitmap on the GPU texture we create a temporary Direct3D surface in the RGB format and copy the decoded contents to this surface. This will change once we have an ANGLE extension which allows us to pass the decoded surface as is for rendering. We do the following prior to initializing the GPU sandbox:- 1. Load necessary decoding dlls. 2. Create static instances of the IDirect3DDeviceManager9 and the IDirect3DDevice9Ex interfaces. These are shared among all decoder instances. This work is done in the PreSandboxInitialization function in the DXVAVideoDecodeAccelerator class. We cannot use CoCreateInstance to instantiate the h.264 decoder as that fails in the sandbox. Instead we do the donkey work of loading the dll and using DllGetClassObject to instantiate the decoder. BUG=none TEST=Refactored the omx_video_decode_accelerator_unittest.cc test to ensure it works on Windows and Chrome OS. This file has been renamed as video_decode_accelerator_unittest.cc as it now works on both windows and cros. Review URL: http://codereview.chromium.org/8510039 git-svn-id: svn://svn.chromium.org/chrome/trunk/src@115482 0039d316-1c4b-4281-b951-d872f2087c98
Diffstat (limited to 'content/common')
-rw-r--r--content/common/gpu/gpu_command_buffer_stub.cc3
-rw-r--r--content/common/gpu/media/dxva_video_decode_accelerator.cc849
-rw-r--r--content/common/gpu/media/dxva_video_decode_accelerator.h221
-rw-r--r--content/common/gpu/media/gpu_video_decode_accelerator.cc37
-rw-r--r--content/common/gpu/media/gpu_video_decode_accelerator.h5
-rw-r--r--content/common/gpu/media/video_decode_accelerator_unittest.cc (renamed from content/common/gpu/media/omx_video_decode_accelerator_unittest.cc)261
6 files changed, 1282 insertions, 94 deletions
diff --git a/content/common/gpu/gpu_command_buffer_stub.cc b/content/common/gpu/gpu_command_buffer_stub.cc
index 8c3ae57..b1d340c 100644
--- a/content/common/gpu/gpu_command_buffer_stub.cc
+++ b/content/common/gpu/gpu_command_buffer_stub.cc
@@ -461,7 +461,8 @@ void GpuCommandBufferStub::OnCreateVideoDecoder(
new GpuVideoDecodeAccelerator(this, decoder_route_id, this);
video_decoders_.AddWithID(decoder, decoder_route_id);
channel_->AddRoute(decoder_route_id, decoder);
- decoder->Initialize(profile, reply_message);
+ decoder->Initialize(profile, reply_message,
+ channel_->renderer_process());
}
void GpuCommandBufferStub::OnDestroyVideoDecoder(int decoder_route_id) {
diff --git a/content/common/gpu/media/dxva_video_decode_accelerator.cc b/content/common/gpu/media/dxva_video_decode_accelerator.cc
new file mode 100644
index 0000000..02c4b5f
--- /dev/null
+++ b/content/common/gpu/media/dxva_video_decode_accelerator.cc
@@ -0,0 +1,849 @@
+// Copyright (c) 2011 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "content/common/gpu/media/dxva_video_decode_accelerator.h"
+
+#if !defined(OS_WIN)
+#error This file should only be built on Windows.
+#endif // !defined(OS_WIN)
+
+#include <ks.h>
+#include <codecapi.h>
+#include <d3dx9tex.h>
+#include <mfapi.h>
+#include <mferror.h>
+#include <wmcodecdsp.h>
+
+#include "base/bind.h"
+#include "base/callback.h"
+#include "base/debug/trace_event.h"
+#include "base/logging.h"
+#include "base/memory/scoped_handle.h"
+#include "base/memory/scoped_ptr.h"
+#include "base/message_loop.h"
+#include "base/process_util.h"
+#include "base/shared_memory.h"
+#include "media/video/video_decode_accelerator.h"
+#include "third_party/angle/include/GLES2/gl2.h"
+#include "third_party/angle/include/GLES2/gl2ext.h"
+
+// We only request 5 picture buffers from the client which are used to hold the
+// decoded samples. These buffers are then reused when the client tells us that
+// it is done with the buffer.
+static const int kNumPictureBuffers = 5;
+
+bool DXVAVideoDecodeAccelerator::pre_sandbox_init_done_ = false;
+uint32 DXVAVideoDecodeAccelerator::dev_manager_reset_token_ = 0;
+IDirect3DDeviceManager9* DXVAVideoDecodeAccelerator::device_manager_ = NULL;
+IDirect3DDevice9Ex* DXVAVideoDecodeAccelerator::device_ = NULL;
+
+#define RETURN_ON_FAILURE(result, log, ret) \
+ do { \
+ if (!(result)) { \
+ DLOG(ERROR) << log; \
+ return ret; \
+ } \
+ } while (0)
+
+#define RETURN_ON_HR_FAILURE(result, log, ret) \
+ RETURN_ON_FAILURE(SUCCEEDED(result), \
+ log << ", HRESULT: 0x" << std::hex << result, \
+ ret);
+
+#define RETURN_AND_NOTIFY_ON_FAILURE(result, log, error_code, ret) \
+ do { \
+ if (!(result)) { \
+ DVLOG(1) << log; \
+ StopOnError(error_code); \
+ return ret; \
+ } \
+ } while (0)
+
+#define RETURN_AND_NOTIFY_ON_HR_FAILURE(result, log, error_code, ret) \
+ RETURN_AND_NOTIFY_ON_FAILURE(SUCCEEDED(result), \
+ log << ", HRESULT: 0x" << std::hex << result, \
+ error_code, ret);
+
+static IMFSample* CreateEmptySample() {
+ base::win::ScopedComPtr<IMFSample> sample;
+ HRESULT hr = MFCreateSample(sample.Receive());
+ RETURN_ON_HR_FAILURE(hr, "MFCreateSample failed", NULL);
+ return sample.Detach();
+}
+
+// Creates a Media Foundation sample with one buffer of length |buffer_length|
+// on a |align|-byte boundary. Alignment must be a perfect power of 2 or 0.
+static IMFSample* CreateEmptySampleWithBuffer(int buffer_length, int align) {
+ CHECK_GT(buffer_length, 0);
+
+ base::win::ScopedComPtr<IMFSample> sample;
+ sample.Attach(CreateEmptySample());
+
+ base::win::ScopedComPtr<IMFMediaBuffer> buffer;
+ HRESULT hr = E_FAIL;
+ if (align == 0) {
+ // Note that MFCreateMemoryBuffer is same as MFCreateAlignedMemoryBuffer
+ // with the align argument being 0.
+ hr = MFCreateMemoryBuffer(buffer_length, buffer.Receive());
+ } else {
+ hr = MFCreateAlignedMemoryBuffer(buffer_length,
+ align - 1,
+ buffer.Receive());
+ }
+ RETURN_ON_HR_FAILURE(hr, "Failed to create memory buffer for sample", NULL);
+
+ hr = sample->AddBuffer(buffer);
+ RETURN_ON_HR_FAILURE(hr, "Failed to add buffer to sample", NULL);
+
+ return sample.Detach();
+}
+
+// Creates a Media Foundation sample with one buffer containing a copy of the
+// given Annex B stream data.
+// If duration and sample time are not known, provide 0.
+// |min_size| specifies the minimum size of the buffer (might be required by
+// the decoder for input). If no alignment is required, provide 0.
+static IMFSample* CreateInputSample(const uint8* stream, int size,
+ int min_size, int alignment) {
+ CHECK(stream);
+ CHECK_GT(size, 0);
+ base::win::ScopedComPtr<IMFSample> sample;
+ sample.Attach(CreateEmptySampleWithBuffer(std::max(min_size, size),
+ alignment));
+ RETURN_ON_FAILURE(sample, "Failed to create empty sample", NULL);
+
+ base::win::ScopedComPtr<IMFMediaBuffer> buffer;
+ HRESULT hr = sample->GetBufferByIndex(0, buffer.Receive());
+ RETURN_ON_HR_FAILURE(hr, "Failed to get buffer from sample", NULL);
+
+ DWORD max_length = 0;
+ DWORD current_length = 0;
+ uint8* destination = NULL;
+ hr = buffer->Lock(&destination, &max_length, &current_length);
+ RETURN_ON_HR_FAILURE(hr, "Failed to lock buffer", NULL);
+
+ CHECK_EQ(current_length, 0u);
+ CHECK_GE(static_cast<int>(max_length), size);
+ memcpy(destination, stream, size);
+
+ hr = buffer->Unlock();
+ RETURN_ON_HR_FAILURE(hr, "Failed to unlock buffer", NULL);
+
+ hr = buffer->SetCurrentLength(size);
+ RETURN_ON_HR_FAILURE(hr, "Failed to set buffer length", NULL);
+
+ return sample.Detach();
+}
+
+static IMFSample* CreateSampleFromInputBuffer(
+ const media::BitstreamBuffer& bitstream_buffer,
+ base::ProcessHandle renderer_process,
+ DWORD stream_size,
+ DWORD alignment) {
+ HANDLE shared_memory_handle = NULL;
+ RETURN_ON_FAILURE(::DuplicateHandle(renderer_process,
+ bitstream_buffer.handle(),
+ base::GetCurrentProcessHandle(),
+ &shared_memory_handle,
+ 0,
+ FALSE,
+ DUPLICATE_SAME_ACCESS),
+ "Duplicate handle failed", NULL);
+
+ base::SharedMemory shm(shared_memory_handle, true);
+ RETURN_ON_FAILURE(shm.Map(bitstream_buffer.size()),
+ "Failed in base::SharedMemory::Map", NULL);
+
+ return CreateInputSample(reinterpret_cast<const uint8*>(shm.memory()),
+ bitstream_buffer.size(),
+ stream_size,
+ alignment);
+}
+
+DXVAVideoDecodeAccelerator::DXVAPictureBuffer::DXVAPictureBuffer(
+ const media::PictureBuffer& buffer)
+ : available(true),
+ picture_buffer(buffer) {
+}
+
+DXVAVideoDecodeAccelerator::PendingSampleInfo::PendingSampleInfo(
+ int32 buffer_id, IDirect3DSurface9* surface)
+ : input_buffer_id(buffer_id),
+ dest_surface(surface) {
+}
+
+DXVAVideoDecodeAccelerator::PendingSampleInfo::~PendingSampleInfo() {}
+
+// static
+void DXVAVideoDecodeAccelerator::PreSandboxInitialization() {
+ // Should be called only once during program startup.
+ DCHECK(!pre_sandbox_init_done_);
+
+ static wchar_t* decoding_dlls[] = {
+ L"d3d9.dll",
+ L"d3dx9_43.dll",
+ L"dxva2.dll",
+ L"mf.dll",
+ L"mfplat.dll",
+ L"msmpeg2vdec.dll",
+ };
+
+ for (int i = 0; i < arraysize(decoding_dlls); ++i) {
+ if (!::LoadLibrary(decoding_dlls[i])) {
+ DLOG(ERROR) << "Failed to load decoder dll: " << decoding_dlls[i]
+ << ", Error: " << ::GetLastError();
+ return;
+ }
+ }
+
+ RETURN_ON_FAILURE(CreateD3DDevManager(),
+ "Failed to initialize D3D device and manager",);
+ pre_sandbox_init_done_ = true;
+}
+
+// static
+bool DXVAVideoDecodeAccelerator::CreateD3DDevManager() {
+ base::win::ScopedComPtr<IDirect3D9Ex> d3d9;
+
+ HRESULT hr = Direct3DCreate9Ex(D3D_SDK_VERSION, d3d9.Receive());
+ RETURN_ON_HR_FAILURE(hr, "Direct3DCreate9Ex failed", false);
+
+ D3DPRESENT_PARAMETERS present_params = {0};
+ present_params.BackBufferWidth = 1;
+ present_params.BackBufferHeight = 1;
+ present_params.BackBufferFormat = D3DFMT_UNKNOWN;
+ present_params.BackBufferCount = 1;
+ present_params.SwapEffect = D3DSWAPEFFECT_DISCARD;
+ present_params.hDeviceWindow = ::GetShellWindow();
+ present_params.Windowed = TRUE;
+ present_params.Flags = D3DPRESENTFLAG_VIDEO;
+ present_params.FullScreen_RefreshRateInHz = 0;
+ present_params.PresentationInterval = 0;
+
+ hr = d3d9->CreateDeviceEx(D3DADAPTER_DEFAULT,
+ D3DDEVTYPE_HAL,
+ ::GetShellWindow(),
+ D3DCREATE_SOFTWARE_VERTEXPROCESSING,
+ &present_params,
+ NULL,
+ &device_);
+ RETURN_ON_HR_FAILURE(hr, "Failed to create D3D device", false);
+
+ hr = DXVA2CreateDirect3DDeviceManager9(&dev_manager_reset_token_,
+ &device_manager_);
+ RETURN_ON_HR_FAILURE(hr, "DXVA2CreateDirect3DDeviceManager9 failed", false);
+
+ hr = device_manager_->ResetDevice(device_, dev_manager_reset_token_);
+ RETURN_ON_HR_FAILURE(hr, "Failed to reset device", false);
+ return true;
+}
+
+DXVAVideoDecodeAccelerator::DXVAVideoDecodeAccelerator(
+ media::VideoDecodeAccelerator::Client* client,
+ base::ProcessHandle renderer_process)
+ : client_(client),
+ state_(kUninitialized),
+ pictures_requested_(false),
+ renderer_process_(renderer_process),
+ last_input_buffer_id_(-1),
+ inputs_before_decode_(0) {
+}
+
+DXVAVideoDecodeAccelerator::~DXVAVideoDecodeAccelerator() {
+ client_ = NULL;
+}
+
+bool DXVAVideoDecodeAccelerator::Initialize(Profile) {
+ DCHECK(CalledOnValidThread());
+
+ RETURN_AND_NOTIFY_ON_FAILURE(pre_sandbox_init_done_,
+ "PreSandbox initialization not completed", PLATFORM_FAILURE, false);
+
+ RETURN_AND_NOTIFY_ON_FAILURE((state_ == kUninitialized),
+ "Initialize: invalid state: " << state_, ILLEGAL_STATE, false);
+
+ HRESULT hr = MFStartup(MF_VERSION, MFSTARTUP_FULL);
+ RETURN_AND_NOTIFY_ON_HR_FAILURE(hr, "MFStartup failed.", PLATFORM_FAILURE,
+ false);
+
+ RETURN_AND_NOTIFY_ON_FAILURE(InitDecoder(),
+ "Failed to initialize decoder", PLATFORM_FAILURE, false);
+
+ RETURN_AND_NOTIFY_ON_FAILURE(GetStreamsInfoAndBufferReqs(),
+ "Failed to get input/output stream info.", PLATFORM_FAILURE, false);
+
+ RETURN_AND_NOTIFY_ON_FAILURE(
+ SendMFTMessage(MFT_MESSAGE_NOTIFY_BEGIN_STREAMING, 0),
+ "Failed to start decoder", PLATFORM_FAILURE, false);
+
+ state_ = kNormal;
+ MessageLoop::current()->PostTask(FROM_HERE,
+ base::Bind(&DXVAVideoDecodeAccelerator::NotifyInitializeDone, this));
+ return true;
+}
+
+void DXVAVideoDecodeAccelerator::Decode(
+ const media::BitstreamBuffer& bitstream_buffer) {
+ DCHECK(CalledOnValidThread());
+
+ RETURN_AND_NOTIFY_ON_FAILURE((state_ == kNormal || state_ == kStopped),
+ "Invalid state: " << state_, ILLEGAL_STATE,);
+
+ base::win::ScopedComPtr<IMFSample> sample;
+ sample.Attach(CreateSampleFromInputBuffer(bitstream_buffer,
+ renderer_process_,
+ input_stream_info_.cbSize,
+ input_stream_info_.cbAlignment));
+ RETURN_AND_NOTIFY_ON_FAILURE(sample, "Failed to create input sample",
+ PLATFORM_FAILURE,);
+ if (!inputs_before_decode_) {
+ TRACE_EVENT_BEGIN_ETW("DXVAVideoDecodeAccelerator.Decoding", this, "");
+ }
+ inputs_before_decode_++;
+
+ RETURN_AND_NOTIFY_ON_FAILURE(
+ SendMFTMessage(MFT_MESSAGE_NOTIFY_START_OF_STREAM, 0),
+ "Failed to create input sample", PLATFORM_FAILURE,);
+
+ HRESULT hr = decoder_->ProcessInput(0, sample, 0);
+ RETURN_AND_NOTIFY_ON_HR_FAILURE(hr, "Failed to process input sample",
+ PLATFORM_FAILURE,);
+
+ RETURN_AND_NOTIFY_ON_FAILURE(
+ SendMFTMessage(MFT_MESSAGE_NOTIFY_END_OF_STREAM, 0),
+ "Failed to send eos message to MFT", PLATFORM_FAILURE,);
+ state_ = kEosDrain;
+
+ last_input_buffer_id_ = bitstream_buffer.id();
+
+ DoDecode();
+
+ RETURN_AND_NOTIFY_ON_FAILURE((state_ == kStopped || state_ == kNormal),
+ "Failed to process output. Unexpected decoder state: " << state_,
+ ILLEGAL_STATE,);
+
+ // The Microsoft Media foundation decoder internally buffers up to 30 frames
+ // before returning a decoded frame. We need to inform the client that this
+ // input buffer is processed as it may stop sending us further input.
+ // Note: This may break clients which expect every input buffer to be
+ // associated with a decoded output buffer.
+ // TODO(ananta)
+ // Do some more investigation into whether it is possible to get the MFT
+ // decoder to emit an output packet for every input packet.
+ // http://code.google.com/p/chromium/issues/detail?id=108121
+ MessageLoop::current()->PostTask(FROM_HERE, base::Bind(
+ &DXVAVideoDecodeAccelerator::NotifyInputBufferRead, this,
+ bitstream_buffer.id()));
+}
+
+void DXVAVideoDecodeAccelerator::AssignPictureBuffers(
+ const std::vector<media::PictureBuffer>& buffers) {
+ DCHECK(CalledOnValidThread());
+ // Copy the picture buffers provided by the client to the available list,
+ // and mark these buffers as available for use.
+ for (size_t buffer_index = 0; buffer_index < buffers.size();
+ ++buffer_index) {
+ bool inserted = output_picture_buffers_.insert(std::make_pair(
+ buffers[buffer_index].id(),
+ DXVAPictureBuffer(buffers[buffer_index]))).second;
+ DCHECK(inserted);
+ }
+ ProcessPendingSamples();
+}
+
+void DXVAVideoDecodeAccelerator::ReusePictureBuffer(
+ int32 picture_buffer_id) {
+ DCHECK(CalledOnValidThread());
+
+ OutputBuffers::iterator it = output_picture_buffers_.find(picture_buffer_id);
+ RETURN_AND_NOTIFY_ON_FAILURE(it != output_picture_buffers_.end(),
+ "Invalid picture id: " << picture_buffer_id, INVALID_ARGUMENT,);
+
+ it->second.available = true;
+ ProcessPendingSamples();
+}
+
+void DXVAVideoDecodeAccelerator::Flush() {
+ DCHECK(CalledOnValidThread());
+
+ DVLOG(1) << "DXVAVideoDecodeAccelerator::Flush";
+
+ RETURN_AND_NOTIFY_ON_FAILURE((state_ == kNormal || state_ == kStopped),
+ "Unexpected decoder state: " << state_, ILLEGAL_STATE,);
+
+ state_ = kEosDrain;
+
+ RETURN_AND_NOTIFY_ON_FAILURE(SendMFTMessage(MFT_MESSAGE_COMMAND_DRAIN, 0),
+ "Failed to send drain message", PLATFORM_FAILURE,);
+
+ // As per MSDN docs after the client sends this message, it calls
+ // IMFTransform::ProcessOutput in a loop, until ProcessOutput returns the
+ // error code MF_E_TRANSFORM_NEED_MORE_INPUT. The DoDecode function sets
+ // the state to kStopped when the decoder returns
+ // MF_E_TRANSFORM_NEED_MORE_INPUT.
+ // The MFT decoder can buffer upto 30 frames worth of input before returning
+ // an output frame. This loop here attempts to retrieve as many output frames
+ // as possible from the buffered set.
+ while (state_ != kStopped) {
+ DoDecode();
+ }
+
+ MessageLoop::current()->PostTask(FROM_HERE, base::Bind(
+ &DXVAVideoDecodeAccelerator::NotifyFlushDone, this));
+
+ state_ = kNormal;
+}
+
+void DXVAVideoDecodeAccelerator::Reset() {
+ DCHECK(CalledOnValidThread());
+
+ DVLOG(1) << "DXVAVideoDecodeAccelerator::Reset";
+
+ RETURN_AND_NOTIFY_ON_FAILURE((state_ == kNormal || state_ == kStopped),
+ "Reset: invalid state: " << state_, ILLEGAL_STATE,);
+
+ state_ = kResetting;
+
+ RETURN_AND_NOTIFY_ON_FAILURE(SendMFTMessage(MFT_MESSAGE_COMMAND_FLUSH, 0),
+ "Reset: Failed to send message.", PLATFORM_FAILURE,);
+
+ MessageLoop::current()->PostTask(FROM_HERE, base::Bind(
+ &DXVAVideoDecodeAccelerator::NotifyResetDone, this));
+
+ state_ = DXVAVideoDecodeAccelerator::kNormal;
+}
+
+void DXVAVideoDecodeAccelerator::Destroy() {
+ DCHECK(CalledOnValidThread());
+ Invalidate();
+}
+
+bool DXVAVideoDecodeAccelerator::InitDecoder() {
+ // We cannot use CoCreateInstance to instantiate the decoder object as that
+ // fails in the sandbox. We mimic the steps CoCreateInstance uses to
+ // instantiate the object.
+ HMODULE decoder_dll = ::GetModuleHandle(L"msmpeg2vdec.dll");
+ RETURN_ON_FAILURE(decoder_dll,
+ "msmpeg2vdec.dll required for decoding is not loaded",
+ false);
+
+ typedef HRESULT (WINAPI* GetClassObject)(const CLSID& clsid,
+ const IID& iid,
+ void** object);
+
+ GetClassObject get_class_object = reinterpret_cast<GetClassObject>(
+ GetProcAddress(decoder_dll, "DllGetClassObject"));
+ RETURN_ON_FAILURE(get_class_object,
+ "Failed to get DllGetClassObject pointer", false);
+
+ base::win::ScopedComPtr<IClassFactory> factory;
+ HRESULT hr = get_class_object(__uuidof(CMSH264DecoderMFT),
+ __uuidof(IClassFactory),
+ reinterpret_cast<void**>(factory.Receive()));
+ RETURN_ON_HR_FAILURE(hr, "DllGetClassObject for decoder failed", false);
+
+ hr = factory->CreateInstance(NULL, __uuidof(IMFTransform),
+ reinterpret_cast<void**>(decoder_.Receive()));
+ RETURN_ON_HR_FAILURE(hr, "Failed to create decoder instance", false);
+
+ RETURN_ON_FAILURE(CheckDecoderDxvaSupport(),
+ "Failed to check decoder DXVA support", false);
+
+ hr = decoder_->ProcessMessage(
+ MFT_MESSAGE_SET_D3D_MANAGER,
+ reinterpret_cast<ULONG_PTR>(device_manager_));
+ RETURN_ON_HR_FAILURE(hr, "Failed to pass D3D manager to decoder", false);
+
+ return SetDecoderMediaTypes();
+}
+
+bool DXVAVideoDecodeAccelerator::CheckDecoderDxvaSupport() {
+ base::win::ScopedComPtr<IMFAttributes> attributes;
+ HRESULT hr = decoder_->GetAttributes(attributes.Receive());
+ RETURN_ON_HR_FAILURE(hr, "Failed to get decoder attributes", false);
+
+ UINT32 dxva = 0;
+ hr = attributes->GetUINT32(MF_SA_D3D_AWARE, &dxva);
+ RETURN_ON_HR_FAILURE(hr, "Failed to check if decoder supports DXVA", false);
+
+ hr = attributes->SetUINT32(CODECAPI_AVDecVideoAcceleration_H264, TRUE);
+ RETURN_ON_HR_FAILURE(hr, "Failed to enable DXVA H/W decoding", false);
+ return true;
+}
+
+bool DXVAVideoDecodeAccelerator::SetDecoderMediaTypes() {
+ RETURN_ON_FAILURE(SetDecoderInputMediaType(),
+ "Failed to set decoder input media type", false);
+ return SetDecoderOutputMediaType(MFVideoFormat_NV12);
+}
+
+bool DXVAVideoDecodeAccelerator::SetDecoderInputMediaType() {
+ base::win::ScopedComPtr<IMFMediaType> media_type;
+ HRESULT hr = MFCreateMediaType(media_type.Receive());
+ RETURN_ON_HR_FAILURE(hr, "MFCreateMediaType failed", false);
+
+ hr = media_type->SetGUID(MF_MT_MAJOR_TYPE, MFMediaType_Video);
+ RETURN_ON_HR_FAILURE(hr, "Failed to set major input type", false);
+
+ hr = media_type->SetGUID(MF_MT_SUBTYPE, MFVideoFormat_H264);
+ RETURN_ON_HR_FAILURE(hr, "Failed to set subtype", false);
+
+ hr = decoder_->SetInputType(0, media_type, 0); // No flags
+ RETURN_ON_HR_FAILURE(hr, "Failed to set decoder input type", false);
+ return true;
+}
+
+bool DXVAVideoDecodeAccelerator::SetDecoderOutputMediaType(
+ const GUID& subtype) {
+ base::win::ScopedComPtr<IMFMediaType> out_media_type;
+
+ for (uint32 i = 0;
+ SUCCEEDED(decoder_->GetOutputAvailableType(0, i,
+ out_media_type.Receive()));
+ ++i) {
+ GUID out_subtype = {0};
+ HRESULT hr = out_media_type->GetGUID(MF_MT_SUBTYPE, &out_subtype);
+ RETURN_ON_HR_FAILURE(hr, "Failed to get output major type", false);
+
+ if (out_subtype == subtype) {
+ hr = decoder_->SetOutputType(0, out_media_type, 0); // No flags
+ RETURN_ON_HR_FAILURE(hr, "Failed to set decoder output type", false);
+ return true;
+ }
+ out_media_type.Release();
+ }
+ return false;
+}
+
+bool DXVAVideoDecodeAccelerator::SendMFTMessage(MFT_MESSAGE_TYPE msg,
+ int32 param) {
+ HRESULT hr = decoder_->ProcessMessage(msg, param);
+ return SUCCEEDED(hr);
+}
+
+// Gets the minimum buffer sizes for input and output samples. The MFT will not
+// allocate buffer for input nor output, so we have to do it ourselves and make
+// sure they're the correct size. We only provide decoding if DXVA is enabled.
+bool DXVAVideoDecodeAccelerator::GetStreamsInfoAndBufferReqs() {
+ HRESULT hr = decoder_->GetInputStreamInfo(0, &input_stream_info_);
+ RETURN_ON_HR_FAILURE(hr, "Failed to get input stream info", false);
+
+ hr = decoder_->GetOutputStreamInfo(0, &output_stream_info_);
+ RETURN_ON_HR_FAILURE(hr, "Failed to get decoder output stream info", false);
+
+ DVLOG(1) << "Input stream info: ";
+ DVLOG(1) << "Max latency: " << input_stream_info_.hnsMaxLatency;
+ // There should be three flags, one for requiring a whole frame be in a
+ // single sample, one for requiring there be one buffer only in a single
+ // sample, and one that specifies a fixed sample size. (as in cbSize)
+ CHECK_EQ(input_stream_info_.dwFlags, 0x7u);
+
+ DVLOG(1) << "Min buffer size: " << input_stream_info_.cbSize;
+ DVLOG(1) << "Max lookahead: " << input_stream_info_.cbMaxLookahead;
+ DVLOG(1) << "Alignment: " << input_stream_info_.cbAlignment;
+
+ DVLOG(1) << "Output stream info: ";
+ // The flags here should be the same and mean the same thing, except when
+ // DXVA is enabled, there is an extra 0x100 flag meaning decoder will
+ // allocate its own sample.
+ DVLOG(1) << "Flags: "
+ << std::hex << std::showbase << output_stream_info_.dwFlags;
+ CHECK_EQ(output_stream_info_.dwFlags, 0x107u);
+ DVLOG(1) << "Min buffer size: " << output_stream_info_.cbSize;
+ DVLOG(1) << "Alignment: " << output_stream_info_.cbAlignment;
+ return true;
+}
+
+void DXVAVideoDecodeAccelerator::DoDecode() {
+ // This function is also called from Flush in a loop which could result
+ // in the state transitioning to kNormal due to decoded output.
+ RETURN_AND_NOTIFY_ON_FAILURE((state_ == kNormal || state_ == kEosDrain),
+ "DoDecode: not in normal/drain state", ILLEGAL_STATE,);
+
+ MFT_OUTPUT_DATA_BUFFER output_data_buffer = {0};
+ DWORD status = 0;
+
+ HRESULT hr = decoder_->ProcessOutput(0, // No flags
+ 1, // # of out streams to pull from
+ &output_data_buffer,
+ &status);
+ IMFCollection* events = output_data_buffer.pEvents;
+ if (events != NULL) {
+ VLOG(1) << "Got events from ProcessOuput, but discarding";
+ events->Release();
+ }
+ if (FAILED(hr)) {
+ // A stream change needs further ProcessInput calls to get back decoder
+ // output which is why we need to set the state to stopped.
+ if (hr == MF_E_TRANSFORM_STREAM_CHANGE) {
+ if (!SetDecoderOutputMediaType(MFVideoFormat_NV12)) {
+ // Decoder didn't let us set NV12 output format. Not sure as to why
+ // this can happen. Give up in disgust.
+ NOTREACHED() << "Failed to set decoder output media type to NV12";
+ state_ = kStopped;
+ } else {
+ DVLOG(1) << "Received output format change from the decoder."
+ " Recursively invoking DoDecode";
+ DoDecode();
+ }
+ return;
+ } else if (hr == MF_E_TRANSFORM_NEED_MORE_INPUT) {
+ // No more output from the decoder. Stop playback.
+ state_ = kStopped;
+ return;
+ } else {
+ NOTREACHED() << "Unhandled error in DoDecode()";
+ return;
+ }
+ }
+ TRACE_EVENT_END_ETW("DXVAVideoDecodeAccelerator.Decoding", this, "");
+
+ TRACE_COUNTER1("DXVA Decoding", "TotalPacketsBeforeDecode",
+ inputs_before_decode_);
+
+ inputs_before_decode_ = 0;
+
+ RETURN_AND_NOTIFY_ON_FAILURE(ProcessOutputSample(output_data_buffer.pSample),
+ "Failed to process output sample.", PLATFORM_FAILURE,);
+
+ state_ = kNormal;
+}
+
+bool DXVAVideoDecodeAccelerator::ProcessOutputSample(IMFSample* sample) {
+ RETURN_ON_FAILURE(sample, "Decode succeeded with NULL output sample", false);
+
+ base::win::ScopedComPtr<IMFSample> output_sample;
+ output_sample.Attach(sample);
+
+ base::win::ScopedComPtr<IMFMediaBuffer> output_buffer;
+ HRESULT hr = sample->GetBufferByIndex(0, output_buffer.Receive());
+ RETURN_ON_HR_FAILURE(hr, "Failed to get buffer from output sample", false);
+
+ base::win::ScopedComPtr<IDirect3DSurface9> surface;
+ hr = MFGetService(output_buffer, MR_BUFFER_SERVICE,
+ IID_PPV_ARGS(surface.Receive()));
+ RETURN_ON_HR_FAILURE(hr, "Failed to get D3D surface from output sample",
+ false);
+
+ D3DSURFACE_DESC surface_desc;
+ hr = surface->GetDesc(&surface_desc);
+ RETURN_ON_HR_FAILURE(hr, "Failed to get surface description", false);
+
+ TRACE_EVENT_BEGIN_ETW("DXVAVideoDecodeAccelerator.SurfaceCreation", this,
+ "");
+ // TODO(ananta)
+ // The code below may not be necessary once we have an ANGLE extension which
+ // allows us to pass the Direct 3D surface directly for rendering.
+
+ // The decoded bits in the source direct 3d surface are in the YUV
+ // format. Angle does not support that. As a workaround we create an
+ // offscreen surface in the RGB format and copy the source surface
+ // to this surface.
+ base::win::ScopedComPtr<IDirect3DSurface9> dest_surface;
+ hr = device_->CreateOffscreenPlainSurface(surface_desc.Width,
+ surface_desc.Height,
+ D3DFMT_A8R8G8B8,
+ D3DPOOL_DEFAULT,
+ dest_surface.Receive(),
+ NULL);
+ RETURN_ON_HR_FAILURE(hr, "Failed to create offscreen surface", false);
+
+ hr = D3DXLoadSurfaceFromSurface(dest_surface, NULL, NULL, surface, NULL,
+ NULL, D3DX_DEFAULT, 0);
+ RETURN_ON_HR_FAILURE(hr, "D3DXLoadSurfaceFromSurface failed", false);
+
+ TRACE_EVENT_END_ETW("DXVAVideoDecodeAccelerator.SurfaceCreation", this, "");
+
+ pending_output_samples_.push_back(
+ PendingSampleInfo(last_input_buffer_id_, dest_surface));
+
+ // If we have available picture buffers to copy the output data then use the
+ // first one and then flag it as not being available for use.
+ if (output_picture_buffers_.size()) {
+ ProcessPendingSamples();
+ return true;
+ }
+ if (pictures_requested_) {
+ DVLOG(1) << "Waiting for picture slots from the client.";
+ return true;
+ }
+ // Go ahead and request picture buffers.
+ MessageLoop::current()->PostTask(FROM_HERE, base::Bind(
+ &DXVAVideoDecodeAccelerator::RequestPictureBuffers,
+ this, surface_desc.Width, surface_desc.Height));
+
+ pictures_requested_ = true;
+ return true;
+}
+
+bool DXVAVideoDecodeAccelerator::CopyOutputSampleDataToPictureBuffer(
+ IDirect3DSurface9* dest_surface, media::PictureBuffer picture_buffer,
+ int input_buffer_id) {
+ DCHECK(dest_surface);
+
+ D3DSURFACE_DESC surface_desc;
+ HRESULT hr = dest_surface->GetDesc(&surface_desc);
+ RETURN_ON_HR_FAILURE(hr, "Failed to get surface description", false);
+
+ scoped_array<char> bits;
+ RETURN_ON_FAILURE(GetBitmapFromSurface(dest_surface, &bits),
+ "Failed to get bitmap from surface for rendering", false);
+
+ // This function currently executes in the context of IPC handlers in the
+ // GPU process which ensures that there is always a OpenGL context.
+ GLint current_texture = 0;
+ glGetIntegerv(GL_TEXTURE_BINDING_2D, &current_texture);
+
+ glBindTexture(GL_TEXTURE_2D, picture_buffer.texture_id());
+ glTexImage2D(GL_TEXTURE_2D, 0, GL_BGRA_EXT, surface_desc.Width,
+ surface_desc.Height, 0, GL_BGRA_EXT, GL_UNSIGNED_BYTE,
+ reinterpret_cast<GLvoid*>(bits.get()));
+ glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_NEAREST);
+
+ glBindTexture(GL_TEXTURE_2D, current_texture);
+
+ media::Picture output_picture(picture_buffer.id(), input_buffer_id);
+ MessageLoop::current()->PostTask(FROM_HERE, base::Bind(
+ &DXVAVideoDecodeAccelerator::NotifyPictureReady, this, output_picture));
+ return true;
+}
+
+void DXVAVideoDecodeAccelerator::ProcessPendingSamples() {
+ if (pending_output_samples_.empty())
+ return;
+
+ OutputBuffers::iterator index;
+
+ for (index = output_picture_buffers_.begin();
+ index != output_picture_buffers_.end() &&
+ !pending_output_samples_.empty();
+ ++index) {
+ if (index->second.available) {
+ PendingSampleInfo sample_info = pending_output_samples_.front();
+
+ CopyOutputSampleDataToPictureBuffer(sample_info.dest_surface,
+ index->second.picture_buffer,
+ sample_info.input_buffer_id);
+ index->second.available = false;
+ pending_output_samples_.pop_front();
+ }
+ }
+}
+
+void DXVAVideoDecodeAccelerator::ClearState() {
+ last_input_buffer_id_ = -1;
+ output_picture_buffers_.clear();
+ pending_output_samples_.clear();
+}
+
+void DXVAVideoDecodeAccelerator::StopOnError(
+ media::VideoDecodeAccelerator::Error error) {
+ DCHECK(CalledOnValidThread());
+
+ if (client_)
+ client_->NotifyError(error);
+ client_ = NULL;
+
+ if (state_ != kUninitialized) {
+ Invalidate();
+ }
+}
+
+bool DXVAVideoDecodeAccelerator::GetBitmapFromSurface(
+ IDirect3DSurface9* surface,
+ scoped_array<char>* bits) {
+ // Get the currently loaded bitmap from the DC.
+ HDC hdc = NULL;
+ HRESULT hr = surface->GetDC(&hdc);
+ RETURN_ON_HR_FAILURE(hr, "Failed to get HDC from surface", false);
+
+ HBITMAP bitmap =
+ reinterpret_cast<HBITMAP>(GetCurrentObject(hdc, OBJ_BITMAP));
+ if (!bitmap) {
+ NOTREACHED() << "Failed to get bitmap from DC";
+ surface->ReleaseDC(hdc);
+ return false;
+ }
+ // TODO(ananta)
+ // The code below may not be necessary once we have an ANGLE extension which
+ // allows us to pass the Direct 3D surface directly for rendering.
+ // The Device dependent bitmap is upside down for OpenGL. We convert the
+ // bitmap to a DIB and render it on the texture instead.
+ BITMAP bitmap_basic_info = {0};
+ if (!GetObject(bitmap, sizeof(BITMAP), &bitmap_basic_info)) {
+ NOTREACHED() << "Failed to read bitmap info";
+ surface->ReleaseDC(hdc);
+ return false;
+ }
+ BITMAPINFO bitmap_info = {0};
+ bitmap_info.bmiHeader.biSize = sizeof(BITMAPINFOHEADER);
+ bitmap_info.bmiHeader.biWidth = bitmap_basic_info.bmWidth;
+ bitmap_info.bmiHeader.biHeight = bitmap_basic_info.bmHeight;
+ bitmap_info.bmiHeader.biPlanes = 1;
+ bitmap_info.bmiHeader.biBitCount = bitmap_basic_info.bmBitsPixel;
+ bitmap_info.bmiHeader.biCompression = BI_RGB;
+ bitmap_info.bmiHeader.biSizeImage = 0;
+ bitmap_info.bmiHeader.biClrUsed = 0;
+
+ int ret = GetDIBits(hdc, bitmap, 0, 0, NULL, &bitmap_info, DIB_RGB_COLORS);
+ if (!ret || bitmap_info.bmiHeader.biSizeImage <= 0) {
+ NOTREACHED() << "Failed to read bitmap size";
+ surface->ReleaseDC(hdc);
+ return false;
+ }
+
+ bits->reset(new char[bitmap_info.bmiHeader.biSizeImage]);
+ ret = GetDIBits(hdc, bitmap, 0, bitmap_basic_info.bmHeight, bits->get(),
+ &bitmap_info, DIB_RGB_COLORS);
+ if (!ret) {
+ NOTREACHED() << "Failed to retrieve bitmap bits.";
+ }
+ surface->ReleaseDC(hdc);
+ return !!ret;
+}
+
+void DXVAVideoDecodeAccelerator::Invalidate() {
+ if (state_ == kUninitialized)
+ return;
+ ClearState();
+ decoder_.Release();
+ MFShutdown();
+ state_ = kUninitialized;
+}
+
+void DXVAVideoDecodeAccelerator::NotifyInitializeDone() {
+ if (client_)
+ client_->NotifyInitializeDone();
+}
+
+void DXVAVideoDecodeAccelerator::NotifyInputBufferRead(int input_buffer_id) {
+ if (client_)
+ client_->NotifyEndOfBitstreamBuffer(input_buffer_id);
+}
+
+void DXVAVideoDecodeAccelerator::NotifyFlushDone() {
+ if (client_)
+ client_->NotifyFlushDone();
+}
+
+void DXVAVideoDecodeAccelerator::NotifyResetDone() {
+ if (client_)
+ client_->NotifyResetDone();
+}
+
+void DXVAVideoDecodeAccelerator::RequestPictureBuffers(int width, int height) {
+ // This task could execute after the decoder has been torn down.
+ if (state_ != kUninitialized && client_) {
+ client_->ProvidePictureBuffers(kNumPictureBuffers,
+ gfx::Size(width, height));
+ }
+}
+
+void DXVAVideoDecodeAccelerator::NotifyPictureReady(
+ const media::Picture& picture) {
+ // This task could execute after the decoder has been torn down.
+ if (state_ != kUninitialized && client_)
+ client_->PictureReady(picture);
+}
+
diff --git a/content/common/gpu/media/dxva_video_decode_accelerator.h b/content/common/gpu/media/dxva_video_decode_accelerator.h
new file mode 100644
index 0000000..0e0b0c4
--- /dev/null
+++ b/content/common/gpu/media/dxva_video_decode_accelerator.h
@@ -0,0 +1,221 @@
+// Copyright (c) 2011 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef CONTENT_COMMON_GPU_MEDIA_DXVA_VIDEO_DECODE_ACCELERATOR_H_
+#define CONTENT_COMMON_GPU_MEDIA_DXVA_VIDEO_DECODE_ACCELERATOR_H_
+
+#include <d3d9.h>
+#include <dxva2api.h>
+#include <list>
+#include <map>
+#include <mfidl.h>
+#include <vector>
+
+#include "base/threading/non_thread_safe.h"
+#include "base/win/scoped_comptr.h"
+#include "media/video/video_decode_accelerator.h"
+#include "third_party/angle/include/EGL/egl.h"
+#include "third_party/angle/include/EGL/eglext.h"
+
+interface IMFSample;
+interface IDirect3DSurface9;
+
+// Class to provide a DXVA 2.0 based accelerator using the Microsoft Media
+// foundation APIs via the VideoDecodeAccelerator interface.
+// This class lives on a single thread and DCHECKs that it is never accessed
+// from any other.
+class DXVAVideoDecodeAccelerator : public media::VideoDecodeAccelerator,
+ public base::NonThreadSafe {
+ public:
+ enum State {
+ kUninitialized, // un-initialized.
+ kNormal, // normal playing state.
+ kResetting, // upon received Reset(), before ResetDone()
+ kEosDrain, // upon input EOS received.
+ kStopped, // upon output EOS received.
+ };
+
+ // Does not take ownership of |client| which must outlive |*this|.
+ DXVAVideoDecodeAccelerator(
+ media::VideoDecodeAccelerator::Client* client,
+ base::ProcessHandle renderer_process);
+ virtual ~DXVAVideoDecodeAccelerator();
+
+ // media::VideoDecodeAccelerator implementation.
+ virtual bool Initialize(Profile) OVERRIDE;
+ virtual void Decode(const media::BitstreamBuffer& bitstream_buffer) OVERRIDE;
+ virtual void AssignPictureBuffers(
+ const std::vector<media::PictureBuffer>& buffers) OVERRIDE;
+ virtual void ReusePictureBuffer(int32 picture_buffer_id) OVERRIDE;
+ virtual void Flush() OVERRIDE;
+ virtual void Reset() OVERRIDE;
+ virtual void Destroy() OVERRIDE;
+
+ // Initialization work needed before the process is sandboxed.
+ // This includes:-
+ // 1. Loads the dlls like mf/mfplat/d3d9, etc required for decoding.
+ // 2. Setting up the device manager instance which is shared between all
+ // decoder instances.
+ static void PreSandboxInitialization();
+
+ private:
+ // Creates and initializes an instance of the D3D device and the
+ // corresponding device manager. The device manager instance is eventually
+ // passed to the IMFTransform interface implemented by the h.264 decoder.
+ static bool CreateD3DDevManager();
+
+ // Creates, initializes and sets the media types for the h.264 decoder.
+ bool InitDecoder();
+
+ // Validates whether the h.264 decoder supports hardware video acceleration.
+ bool CheckDecoderDxvaSupport();
+
+ // Returns information about the input and output streams. This includes
+ // alignment information, decoder support flags, minimum sample size, etc.
+ bool GetStreamsInfoAndBufferReqs();
+
+ // Registers the input and output media types on the h.264 decoder. This
+ // includes the expected input and output formats.
+ bool SetDecoderMediaTypes();
+
+ // Registers the input media type for the h.264 decoder.
+ bool SetDecoderInputMediaType();
+
+ // Registers the output media type for the h.264 decoder.
+ bool SetDecoderOutputMediaType(const GUID& subtype);
+
+ // Passes a command message to the decoder. This includes commands like
+ // start of stream, end of stream, flush, drain the decoder, etc.
+ bool SendMFTMessage(MFT_MESSAGE_TYPE msg, int32 param);
+
+ // The bulk of the decoding happens here. This function handles errors,
+ // format changes and processes decoded output.
+ void DoDecode();
+
+ // Invoked when we have a valid decoded output sample. Retrieves the D3D
+ // surface and maintains a copy of it which is passed eventually to the
+ // client when we have a picture buffer to copy the surface contents to.
+ bool ProcessOutputSample(IMFSample* sample);
+
+ // Copies the output sample data to the picture buffer provided by the
+ // client.
+ bool CopyOutputSampleDataToPictureBuffer(IDirect3DSurface9* dest_surface,
+ media::PictureBuffer picture_buffer,
+ int32 input_buffer_id);
+
+ // Processes pending output samples by copying them to available picture
+ // slots.
+ void ProcessPendingSamples();
+
+ // Clears local state maintained by the decoder.
+ void ClearState();
+
+ // Helper function to notify the accelerator client about the error.
+ void StopOnError(media::VideoDecodeAccelerator::Error error);
+
+ // Transitions the decoder to the uninitialized state. The decoder will stop
+ // accepting requests in this state.
+ void Invalidate();
+
+ // Helper function to read the bitmap from the D3D surface passed in.
+ bool GetBitmapFromSurface(IDirect3DSurface9* surface,
+ scoped_array<char>* bits);
+
+ // Notifies the client that the input buffer identifed by input_buffer_id has
+ // been processed.
+ void NotifyInputBufferRead(int input_buffer_id);
+
+ // Notifies the client that initialize was completed.
+ void NotifyInitializeDone();
+
+ // Notifies the client that the decoder was flushed.
+ void NotifyFlushDone();
+
+ // Notifies the client that the decoder was reset.
+ void NotifyResetDone();
+
+ // Requests picture buffers from the client.
+ void RequestPictureBuffers(int width, int height);
+
+ // Notifies the client about the availability of a picture.
+ void NotifyPictureReady(const media::Picture& picture);
+
+ // To expose client callbacks from VideoDecodeAccelerator.
+ media::VideoDecodeAccelerator::Client* client_;
+
+ base::win::ScopedComPtr<IMFTransform> decoder_;
+
+ // These interface pointers are initialized before the process is sandboxed.
+ // They are not released when the GPU process exits. This is ok for now
+ // because the GPU process does not exit normally on Windows. It is always
+ // terminated. The device manager instance is shared among all decoder
+ // instances. This is OK because there is internal locking performed by the
+ // device manager.
+ static IDirect3DDeviceManager9* device_manager_;
+ static IDirect3DDevice9Ex* device_;
+
+ // Current state of the decoder.
+ State state_;
+
+ MFT_INPUT_STREAM_INFO input_stream_info_;
+ MFT_OUTPUT_STREAM_INFO output_stream_info_;
+
+ // Contains information about a decoded sample.
+ struct PendingSampleInfo {
+ PendingSampleInfo(int32 buffer_id, IDirect3DSurface9* surface);
+ ~PendingSampleInfo();
+
+ int32 input_buffer_id;
+ base::win::ScopedComPtr<IDirect3DSurface9> dest_surface;
+ };
+
+ typedef std::list<PendingSampleInfo> PendingOutputSamples;
+
+ // List of decoded output samples.
+ PendingOutputSamples pending_output_samples_;
+
+ // Maintains information about a DXVA picture buffer, i.e. whether it is
+ // available for rendering, the texture information, etc.
+ struct DXVAPictureBuffer {
+ explicit DXVAPictureBuffer(const media::PictureBuffer& buffer);
+
+ bool available;
+ media::PictureBuffer picture_buffer;
+ };
+
+ // This map maintains the picture buffers passed the client for decoding.
+ // The key is the picture buffer id.
+ typedef std::map<int32, DXVAPictureBuffer> OutputBuffers;
+ OutputBuffers output_picture_buffers_;
+
+ // Set to true if we requested picture slots from the client.
+ bool pictures_requested_;
+
+ // Contains the id of the last input buffer received from the client.
+ int32 last_input_buffer_id_;
+
+ // Handle to the renderer process.
+ base::ProcessHandle renderer_process_;
+
+ // Ideally the reset token would be a stack variable which is used while
+ // creating the device manager. However it seems that the device manager
+ // holds onto the token and attempts to access it if the underlying device
+ // changes.
+ // TODO(ananta): This needs to be verified.
+ static uint32 dev_manager_reset_token_;
+
+ // Counter which holds the number of input packets before a successful
+ // decode.
+ int inputs_before_decode_;
+
+ // Set to true if all necessary initialization needed before the GPU process
+ // is sandboxed is done.
+ // This includes the following:
+ // 1. All required decoder dlls were successfully loaded.
+ // 2. The device manager initialization completed.
+ static bool pre_sandbox_init_done_;
+};
+
+#endif // CONTENT_COMMON_GPU_MEDIA_DXVA_VIDEO_DECODE_ACCELERATOR_H_
+
diff --git a/content/common/gpu/media/gpu_video_decode_accelerator.cc b/content/common/gpu/media/gpu_video_decode_accelerator.cc
index 1014997..9dbd2e2 100644
--- a/content/common/gpu/media/gpu_video_decode_accelerator.cc
+++ b/content/common/gpu/media/gpu_video_decode_accelerator.cc
@@ -9,17 +9,28 @@
#include "base/bind.h"
#include "base/logging.h"
#include "base/stl_util.h"
+
+#if defined(OS_WIN)
+#include "base/win/windows_version.h"
+#endif // OS_WIN
+
#include "gpu/command_buffer/common/command_buffer.h"
#include "ipc/ipc_message_macros.h"
#include "ipc/ipc_message_utils.h"
#include "content/common/gpu/gpu_channel.h"
#include "content/common/gpu/gpu_command_buffer_stub.h"
#include "content/common/gpu/gpu_messages.h"
-#if defined(OS_CHROMEOS) && defined(ARCH_CPU_ARMEL)
+
+#if (defined(OS_CHROMEOS) && defined(ARCH_CPU_ARMEL)) || defined(OS_WIN)
+#if defined(OS_WIN)
+#include "content/common/gpu/media/dxva_video_decode_accelerator.h"
+#else // OS_WIN
#include "content/common/gpu/media/omx_video_decode_accelerator.h"
+#endif // OS_WIN
#include "ui/gfx/gl/gl_context.h"
#include "ui/gfx/gl/gl_surface_egl.h"
#endif
+
#include "gpu/command_buffer/service/texture_manager.h"
#include "ui/gfx/size.h"
@@ -109,18 +120,32 @@ void GpuVideoDecodeAccelerator::NotifyError(
void GpuVideoDecodeAccelerator::Initialize(
const media::VideoDecodeAccelerator::Profile profile,
- IPC::Message* init_done_msg) {
+ IPC::Message* init_done_msg,
+ base::ProcessHandle renderer_process) {
DCHECK(!video_decode_accelerator_.get());
DCHECK(!init_done_msg_);
DCHECK(init_done_msg);
init_done_msg_ = init_done_msg;
-#if defined(OS_CHROMEOS) && defined(ARCH_CPU_ARMEL)
+
+#if (defined(OS_CHROMEOS) && defined(ARCH_CPU_ARMEL)) || defined(OS_WIN)
DCHECK(stub_ && stub_->decoder());
- OmxVideoDecodeAccelerator* omx_decoder = new OmxVideoDecodeAccelerator(this);
- omx_decoder->SetEglState(
+#if defined(OS_WIN)
+ if (base::win::GetVersion() < base::win::VERSION_WIN7) {
+ NOTIMPLEMENTED() << "HW video decode acceleration not available.";
+ NotifyError(media::VideoDecodeAccelerator::PLATFORM_FAILURE);
+ return;
+ }
+ DLOG(INFO) << "Initializing DXVA HW decoder for windows.";
+ DXVAVideoDecodeAccelerator* video_decoder =
+ new DXVAVideoDecodeAccelerator(this, renderer_process);
+#else // OS_WIN
+ OmxVideoDecodeAccelerator* video_decoder =
+ new OmxVideoDecodeAccelerator(this);
+ video_decoder->SetEglState(
gfx::GLSurfaceEGL::GetHardwareDisplay(),
stub_->decoder()->GetGLContext()->GetHandle());
- video_decode_accelerator_ = omx_decoder;
+#endif // OS_WIN
+ video_decode_accelerator_ = video_decoder;
video_decode_accelerator_->Initialize(profile);
#else // Update RenderViewImpl::createMediaPlayer when adding clauses.
NOTIMPLEMENTED() << "HW video decode acceleration not available.";
diff --git a/content/common/gpu/media/gpu_video_decode_accelerator.h b/content/common/gpu/media/gpu_video_decode_accelerator.h
index 3acf3f6..aa9f726 100644
--- a/content/common/gpu/media/gpu_video_decode_accelerator.h
+++ b/content/common/gpu/media/gpu_video_decode_accelerator.h
@@ -45,8 +45,11 @@ class GpuVideoDecodeAccelerator
// Initialize the accelerator with the given profile and send the
// |init_done_msg| when done.
+ // The renderer process handle is valid as long as we have a channel between
+ // GPU process and the renderer.
void Initialize(const media::VideoDecodeAccelerator::Profile profile,
- IPC::Message* init_done_msg);
+ IPC::Message* init_done_msg,
+ base::ProcessHandle renderer_process);
private:
diff --git a/content/common/gpu/media/omx_video_decode_accelerator_unittest.cc b/content/common/gpu/media/video_decode_accelerator_unittest.cc
index 727b7fe..f6bebc7 100644
--- a/content/common/gpu/media/omx_video_decode_accelerator_unittest.cc
+++ b/content/common/gpu/media/video_decode_accelerator_unittest.cc
@@ -28,6 +28,7 @@
#include "base/bind.h"
#include "base/command_line.h"
#include "base/file_util.h"
+#include "base/process_util.h"
#include "base/stl_util.h"
#include "base/string_number_conversions.h"
#include "base/string_split.h"
@@ -36,14 +37,21 @@
#include "base/synchronization/lock.h"
#include "base/synchronization/waitable_event.h"
#include "base/threading/thread.h"
+#include "base/utf_string_conversions.h"
+
+#if (!defined(OS_CHROMEOS) || !defined(ARCH_CPU_ARMEL)) && !defined(OS_WIN)
+#error The VideoAccelerator tests are only supported on cros/ARM/Windows.
+#endif
+
+#if defined(OS_WIN)
+#include "content/common/gpu/media/dxva_video_decode_accelerator.h"
+#else // OS_WIN
#include "content/common/gpu/media/omx_video_decode_accelerator.h"
+#endif // defined(OS_WIN)
+
#include "third_party/angle/include/EGL/egl.h"
#include "third_party/angle/include/GLES2/gl2.h"
-#if !defined(OS_CHROMEOS) || !defined(ARCH_CPU_ARMEL)
-#error This test (and OmxVideoDecodeAccelerator) are only supported on cros/ARM!
-#endif
-
using media::VideoDecodeAccelerator;
namespace {
@@ -61,20 +69,21 @@ namespace {
// (the latter tests just decode speed).
// - |profile| is the media::H264Profile set during Initialization.
// An empty value for a numeric field means "ignore".
-const char* test_video_data = "test-25fps.h264:320:240:250:258:50:175:1";
+const FilePath::CharType* test_video_data =
+ FILE_PATH_LITERAL("test-25fps.h264:320:240:250:258:50:175:1");
// Parse |data| into its constituent parts and set the various output fields
// accordingly. CHECK-fails on unexpected or missing required data.
// Unspecified optional fields are set to -1.
-void ParseTestVideoData(std::string data,
- std::string* file_name,
+void ParseTestVideoData(FilePath::StringType data,
+ FilePath::StringType* file_name,
int* width, int* height,
int* num_frames,
int* num_NALUs,
int* min_fps_render,
int* min_fps_no_render,
int* profile) {
- std::vector<std::string> elements;
+ std::vector<FilePath::StringType> elements;
base::SplitString(data, ':', &elements);
CHECK_GE(elements.size(), 1U) << data;
CHECK_LE(elements.size(), 8U) << data;
@@ -98,17 +107,12 @@ void ParseTestVideoData(std::string data,
CHECK(base::StringToInt(elements[7], profile));
}
-
-// Helper for managing X11, EGL, and GLES2 resources. Xlib is not thread-safe,
-// and GL state is thread-specific, so all the methods of this class (except for
-// ctor/dtor) ensure they're being run on a single thread.
-//
-// TODO(fischman): consider moving this into media/ if we can de-dup some of the
-// code that ends up getting copy/pasted all over the place (esp. the GL setup
-// code).
+// Provides functionality for managing EGL, GLES2 and UI resources.
+// This class is not thread safe and thus all the methods of this class
+// (except for ctor/dtor) ensure they're being run on a single thread.
class RenderingHelper {
public:
- explicit RenderingHelper();
+ RenderingHelper();
~RenderingHelper();
// Initialize all structures to prepare to render to one or more windows of
@@ -119,8 +123,8 @@ class RenderingHelper {
// then all the usual work is done, except for the final swap of the EGL
// surface to the display. This cuts test times over 50% so is worth doing
// when testing non-rendering-related aspects.
- void Initialize(bool suppress_swap_to_display, int num_windows,
- int width, int height, base::WaitableEvent* done);
+ void Initialize(bool suppress_swap_to_display, int num_windows, int width,
+ int height, base::WaitableEvent* done);
// Undo the effects of Initialize() and signal |*done|.
void UnInitialize(base::WaitableEvent* done);
@@ -136,26 +140,43 @@ class RenderingHelper {
// Delete |texture_id|.
void DeleteTexture(GLuint texture_id);
+ // Platform specific Init/Uninit.
+ void PlatformInitialize();
+ void PlatformUnInitialize();
+
+ // Platform specific window creation.
+ EGLNativeWindowType PlatformCreateWindow(int top_left_x, int top_left_y);
+
+ // Platform specific display surface returned here.
+ EGLDisplay PlatformGetDisplay();
+
EGLDisplay egl_display() { return egl_display_; }
+
EGLContext egl_context() { return egl_context_; }
+
MessageLoop* message_loop() { return message_loop_; }
- private:
- // Zero-out internal state. Helper for ctor & UnInitialize().
+ protected:
void Clear();
- bool suppress_swap_to_display_;
+ // We ensure all operations are carried out on the same thread by remembering
+ // where we were Initialized.
+ MessageLoop* message_loop_;
int width_;
int height_;
- Display* x_display_;
- std::vector<Window> x_windows_;
+ bool suppress_swap_to_display_;
+
EGLDisplay egl_display_;
EGLContext egl_context_;
std::vector<EGLSurface> egl_surfaces_;
std::map<GLuint, int> texture_id_to_surface_index_;
- // We ensure all operations are carried out on the same thread by remembering
- // where we were Initialized.
- MessageLoop* message_loop_;
+
+#if defined(OS_WIN)
+ std::vector<HWND> windows_;
+#else // OS_WIN
+ Display* x_display_;
+ std::vector<Window> x_windows_;
+#endif // OS_WIN
};
RenderingHelper::RenderingHelper() {
@@ -164,19 +185,7 @@ RenderingHelper::RenderingHelper() {
RenderingHelper::~RenderingHelper() {
CHECK_EQ(width_, 0) << "Must call UnInitialize before dtor.";
-}
-
-void RenderingHelper::Clear() {
- suppress_swap_to_display_ = false;
- width_ = 0;
- height_ = 0;
- x_display_ = NULL;
- x_windows_.clear();
- egl_display_ = EGL_NO_DISPLAY;
- egl_context_ = EGL_NO_CONTEXT;
- egl_surfaces_.clear();
- texture_id_to_surface_index_.clear();
- message_loop_ = NULL;
+ Clear();
}
// Helper for Shader creation.
@@ -200,7 +209,8 @@ static void CreateShader(
void RenderingHelper::Initialize(
bool suppress_swap_to_display,
int num_windows,
- int width, int height,
+ int width,
+ int height,
base::WaitableEvent* done) {
// Use width_ != 0 as a proxy for the class having already been
// Initialize()'d, and UnInitialize() before continuing.
@@ -218,15 +228,10 @@ void RenderingHelper::Initialize(
message_loop_ = MessageLoop::current();
CHECK_GT(num_windows, 0);
- // Per-display X11 & EGL initialization.
- CHECK(x_display_ = XOpenDisplay(NULL));
- int depth = DefaultDepth(x_display_, DefaultScreen(x_display_));
- XSetWindowAttributes window_attributes;
- window_attributes.background_pixel =
- BlackPixel(x_display_, DefaultScreen(x_display_));
- window_attributes.override_redirect = true;
+ PlatformInitialize();
+
+ egl_display_ = PlatformGetDisplay();
- egl_display_ = eglGetDisplay(x_display_);
EGLint major;
EGLint minor;
CHECK(eglInitialize(egl_display_, &major, &minor)) << eglGetError();
@@ -253,28 +258,16 @@ void RenderingHelper::Initialize(
// Arrange X windows whimsically, with some padding.
int top_left_x = (width + 20) * (i % 4);
int top_left_y = (height + 12) * (i % 3);
- Window x_window = XCreateWindow(
- x_display_, DefaultRootWindow(x_display_),
- top_left_x, top_left_y, width_, height_,
- 0 /* border width */,
- depth, CopyFromParent /* class */, CopyFromParent /* visual */,
- (CWBackPixel | CWOverrideRedirect), &window_attributes);
- x_windows_.push_back(x_window);
- XStoreName(x_display_, x_window, "OmxVideoDecodeAcceleratorTest");
- XSelectInput(x_display_, x_window, ExposureMask);
- XMapWindow(x_display_, x_window);
+ EGLNativeWindowType window = PlatformCreateWindow(top_left_x, top_left_y);
EGLSurface egl_surface =
- eglCreateWindowSurface(egl_display_, egl_config, x_window, NULL);
+ eglCreateWindowSurface(egl_display_, egl_config, window, NULL);
egl_surfaces_.push_back(egl_surface);
CHECK_NE(egl_surface, EGL_NO_SURFACE);
}
CHECK(eglMakeCurrent(egl_display_, egl_surfaces_[0],
egl_surfaces_[0], egl_context_)) << eglGetError();
- // GLES2 initialization. Note: This is pretty much copy/pasted from
- // media/tools/player_x11/gles_video_renderer.cc, with some simplification
- // applied.
static const float kVertices[] =
{ -1.f, 1.f, -1.f, -1.f, 1.f, 1.f, 1.f, -1.f, };
static const float kTextureCoordsEgl[] = { 0, 1, 0, 0, 1, 1, 1, 0, };
@@ -319,28 +312,33 @@ void RenderingHelper::Initialize(
glEnableVertexAttribArray(tc_location);
glVertexAttribPointer(tc_location, 2, GL_FLOAT, GL_FALSE, 0,
kTextureCoordsEgl);
-
done->Signal();
}
void RenderingHelper::UnInitialize(base::WaitableEvent* done) {
CHECK_EQ(MessageLoop::current(), message_loop_);
- // Destroy resources acquired in Initialize, in reverse-acquisition order.
CHECK(eglMakeCurrent(egl_display_, EGL_NO_SURFACE, EGL_NO_SURFACE,
EGL_NO_CONTEXT)) << eglGetError();
CHECK(eglDestroyContext(egl_display_, egl_context_));
for (size_t i = 0; i < egl_surfaces_.size(); ++i)
CHECK(eglDestroySurface(egl_display_, egl_surfaces_[i]));
CHECK(eglTerminate(egl_display_));
- for (size_t i = 0; i < x_windows_.size(); ++i) {
- CHECK(XUnmapWindow(x_display_, x_windows_[i]));
- CHECK(XDestroyWindow(x_display_, x_windows_[i]));
- }
- // Mimic newly-created object.
Clear();
done->Signal();
}
+void RenderingHelper::Clear() {
+ suppress_swap_to_display_ = false;
+ width_ = 0;
+ height_ = 0;
+ texture_id_to_surface_index_.clear();
+ message_loop_ = NULL;
+ egl_display_ = EGL_NO_DISPLAY;
+ egl_context_ = EGL_NO_CONTEXT;
+ egl_surfaces_.clear();
+ PlatformUnInitialize();
+}
+
void RenderingHelper::CreateTexture(int window_id, GLuint* texture_id,
base::WaitableEvent* done) {
if (MessageLoop::current() != message_loop_) {
@@ -390,6 +388,76 @@ void RenderingHelper::DeleteTexture(GLuint texture_id) {
CHECK_EQ(static_cast<int>(glGetError()), GL_NO_ERROR);
}
+#if defined(OS_WIN)
+void RenderingHelper::PlatformInitialize() {}
+
+void RenderingHelper::PlatformUnInitialize() {
+ for (size_t i = 0; i < windows_.size(); ++i) {
+ DestroyWindow(windows_[i]);
+ }
+ windows_.clear();
+}
+
+EGLNativeWindowType RenderingHelper::PlatformCreateWindow(
+ int top_left_x, int top_left_y) {
+ HWND window = CreateWindowEx(0, L"Static", L"VideoDecodeAcceleratorTest",
+ WS_OVERLAPPEDWINDOW | WS_VISIBLE, top_left_x,
+ top_left_y, width_, height_, NULL, NULL, NULL,
+ NULL);
+ CHECK(window != NULL);
+ windows_.push_back(window);
+ return window;
+}
+
+EGLDisplay RenderingHelper::PlatformGetDisplay() {
+ return eglGetDisplay(EGL_DEFAULT_DISPLAY);
+}
+
+#else // OS_WIN
+
+void RenderingHelper::PlatformInitialize() {
+ CHECK(x_display_ = XOpenDisplay(NULL));
+}
+
+void RenderingHelper::PlatformUnInitialize() {
+ // Destroy resources acquired in Initialize, in reverse-acquisition order.
+ for (size_t i = 0; i < x_windows_.size(); ++i) {
+ CHECK(XUnmapWindow(x_display_, x_windows_[i]));
+ CHECK(XDestroyWindow(x_display_, x_windows_[i]));
+ }
+ // Mimic newly created object.
+ x_display_ = NULL;
+ x_windows_.clear();
+}
+
+EGLDisplay RenderingHelper::PlatformGetDisplay() {
+ return eglGetDisplay(x_display_);
+}
+
+EGLNativeWindowType RenderingHelper::PlatformCreateWindow(int top_left_x,
+ int top_left_y) {
+ int depth = DefaultDepth(x_display_, DefaultScreen(x_display_));
+
+ XSetWindowAttributes window_attributes;
+ window_attributes.background_pixel =
+ BlackPixel(x_display_, DefaultScreen(x_display_));
+ window_attributes.override_redirect = true;
+
+ Window x_window = XCreateWindow(
+ x_display_, DefaultRootWindow(x_display_),
+ top_left_x, top_left_y, width_, height_,
+ 0 /* border width */,
+ depth, CopyFromParent /* class */, CopyFromParent /* visual */,
+ (CWBackPixel | CWOverrideRedirect), &window_attributes);
+ x_windows_.push_back(x_window);
+ XStoreName(x_display_, x_window, "VideoDecodeAcceleratorTest");
+ XSelectInput(x_display_, x_window, ExposureMask);
+ XMapWindow(x_display_, x_window);
+ return x_window;
+}
+
+#endif // OS_WIN
+
// State of the EglRenderingVDAClient below. Order matters here as the test
// makes assumptions about it.
enum ClientState {
@@ -523,7 +591,7 @@ class EglRenderingVDAClient : public VideoDecodeAccelerator::Client {
size_t encoded_data_next_pos_to_decode_;
int next_bitstream_buffer_id_;
ClientStateNotification* note_;
- scoped_refptr<OmxVideoDecodeAccelerator> decoder_;
+ scoped_refptr<VideoDecodeAccelerator> decoder_;
std::set<int> outstanding_texture_ids_;
int reset_after_frame_num_;
int delete_decoder_state_;
@@ -569,8 +637,15 @@ EglRenderingVDAClient::~EglRenderingVDAClient() {
void EglRenderingVDAClient::CreateDecoder() {
CHECK(decoder_deleted());
- decoder_ = new OmxVideoDecodeAccelerator(this);
- decoder_->SetEglState(egl_display(), egl_context());
+#if defined(OS_WIN)
+ scoped_refptr<DXVAVideoDecodeAccelerator> decoder =
+ new DXVAVideoDecodeAccelerator(this, base::GetCurrentProcessHandle());
+#else // OS_WIN
+ scoped_refptr<OmxVideoDecodeAccelerator> decoder =
+ new OmxVideoDecodeAccelerator(this);
+ decoder->SetEglState(egl_display(), egl_context());
+#endif // OS_WIN
+ decoder_ = decoder.release();
SetState(CS_DECODER_SET);
if (decoder_deleted())
return;
@@ -785,7 +860,7 @@ double EglRenderingVDAClient::frames_per_second() {
// - Number of concurrent in-flight Decode() calls per decoder.
// - reset_after_frame_num: see EglRenderingVDAClient ctor.
// - delete_decoder_phase: see EglRenderingVDAClient ctor.
-class OmxVideoDecodeAcceleratorTest
+class VideoDecodeAcceleratorTest
: public ::testing::TestWithParam<
Tuple5<int, int, int, ResetPoint, ClientState> > {
};
@@ -809,7 +884,7 @@ enum { kMinSupportedNumConcurrentDecoders = 3 };
// Test the most straightforward case possible: data is decoded from a single
// chunk and rendered to the screen.
-TEST_P(OmxVideoDecodeAcceleratorTest, TestSimpleDecode) {
+TEST_P(VideoDecodeAcceleratorTest, TestSimpleDecode) {
// Can be useful for debugging VLOGs from OVDA.
// logging::SetMinLogLevel(-1);
@@ -822,7 +897,7 @@ TEST_P(OmxVideoDecodeAcceleratorTest, TestSimpleDecode) {
const int reset_after_frame_num = GetParam().d;
const int delete_decoder_state = GetParam().e;
- std::string test_video_file;
+ FilePath::StringType test_video_file;
int frame_width, frame_height;
int num_frames, num_NALUs, min_fps_render, min_fps_no_render, profile;
ParseTestVideoData(test_video_data, &test_video_file, &frame_width,
@@ -849,7 +924,15 @@ TEST_P(OmxVideoDecodeAcceleratorTest, TestSimpleDecode) {
// Initialize the rendering helper.
base::Thread rendering_thread("EglRenderingVDAClientThread");
- rendering_thread.Start();
+ base::Thread::Options options;
+ options.message_loop_type = MessageLoop::TYPE_DEFAULT;
+#if defined(OS_WIN)
+ // For windows the decoding thread initializes the media foundation decoder
+ // which uses COM. We need the thread to be a UI thread.
+ options.message_loop_type = MessageLoop::TYPE_UI;
+#endif // OS_WIN
+
+ rendering_thread.StartWithOptions(options);
RenderingHelper rendering_helper;
base::WaitableEvent done(false, false);
@@ -889,7 +972,8 @@ TEST_P(OmxVideoDecodeAcceleratorTest, TestSimpleDecode) {
// We expect initialization to fail only when more than the supported
// number of decoders is instantiated. Assert here that something else
// didn't trigger failure.
- ASSERT_GT(num_concurrent_decoders, kMinSupportedNumConcurrentDecoders);
+ ASSERT_GT(num_concurrent_decoders,
+ static_cast<size_t>(kMinSupportedNumConcurrentDecoders));
continue;
}
ASSERT_EQ(state, CS_INITIALIZED);
@@ -947,14 +1031,14 @@ TEST_P(OmxVideoDecodeAcceleratorTest, TestSimpleDecode) {
// Test that Reset() mid-stream works fine and doesn't affect decoding even when
// Decode() calls are made during the reset.
INSTANTIATE_TEST_CASE_P(
- MidStreamReset, OmxVideoDecodeAcceleratorTest,
+ MidStreamReset, VideoDecodeAcceleratorTest,
::testing::Values(
MakeTuple(1, 1, 1, static_cast<ResetPoint>(100), CS_RESET)));
// Test that Destroy() mid-stream works fine (primarily this is testing that no
// crashes occur).
INSTANTIATE_TEST_CASE_P(
- TearDownTiming, OmxVideoDecodeAcceleratorTest,
+ TearDownTiming, VideoDecodeAcceleratorTest,
::testing::Values(
MakeTuple(1, 1, 1, END_OF_STREAM_RESET, CS_DECODER_SET),
MakeTuple(1, 1, 1, END_OF_STREAM_RESET, CS_INITIALIZED),
@@ -970,7 +1054,7 @@ INSTANTIATE_TEST_CASE_P(
// Test that decoding various variation works: multiple concurrent decoders and
// multiple NALUs per Decode() call.
INSTANTIATE_TEST_CASE_P(
- DecodeVariations, OmxVideoDecodeAcceleratorTest,
+ DecodeVariations, VideoDecodeAcceleratorTest,
::testing::Values(
MakeTuple(1, 1, 1, END_OF_STREAM_RESET, CS_RESET),
MakeTuple(1, 1, 10, END_OF_STREAM_RESET, CS_RESET),
@@ -991,7 +1075,7 @@ INSTANTIATE_TEST_CASE_P(
// Find out how many concurrent decoders can go before we exhaust system
// resources.
INSTANTIATE_TEST_CASE_P(
- ResourceExhaustion, OmxVideoDecodeAcceleratorTest,
+ ResourceExhaustion, VideoDecodeAcceleratorTest,
::testing::Values(
// +0 hack below to promote enum to int.
MakeTuple(1, kMinSupportedNumConcurrentDecoders + 0, 1,
@@ -1009,16 +1093,21 @@ INSTANTIATE_TEST_CASE_P(
int main(int argc, char **argv) {
testing::InitGoogleTest(&argc, argv); // Removes gtest-specific args.
- CommandLine cmd_line(argc, argv); // Must run after InitGoogleTest.
- CommandLine::SwitchMap switches = cmd_line.GetSwitches();
+ CommandLine::Init(argc, argv);
+
+ CommandLine* cmd_line = CommandLine::ForCurrentProcess();
+ DCHECK(cmd_line);
+
+ CommandLine::SwitchMap switches = cmd_line->GetSwitches();
for (CommandLine::SwitchMap::const_iterator it = switches.begin();
it != switches.end(); ++it) {
if (it->first == "test_video_data") {
test_video_data = it->second.c_str();
- continue;
}
LOG(FATAL) << "Unexpected switch: " << it->first << ":" << it->second;
}
-
+#if defined(OS_WIN)
+ DXVAVideoDecodeAccelerator::PreSandboxInitialization();
+#endif // OS_WIN
return RUN_ALL_TESTS();
}