summaryrefslogtreecommitdiffstats
path: root/media
diff options
context:
space:
mode:
authorimcheng@chromium.org <imcheng@chromium.org@0039d316-1c4b-4281-b951-d872f2087c98>2010-09-17 23:15:35 +0000
committerimcheng@chromium.org <imcheng@chromium.org@0039d316-1c4b-4281-b951-d872f2087c98>2010-09-17 23:15:35 +0000
commit3281078284ccc8fd196c1cf4cc29fac63b6485ee (patch)
tree0b9ee7466c40787e6873fc1119e9041ee78bc8cf /media
parent3598c6021c4a79bc954153c6f02140826229e254 (diff)
downloadchromium_src-3281078284ccc8fd196c1cf4cc29fac63b6485ee.zip
chromium_src-3281078284ccc8fd196c1cf4cc29fac63b6485ee.tar.gz
chromium_src-3281078284ccc8fd196c1cf4cc29fac63b6485ee.tar.bz2
Video decode context for MFT H264 decode engine. The context will be constructed before the creation of the decode engine and it will be passed into the decode engine's initialization.
This context manages D3DTextures which will be read by the renderer. The decode engine asks the context to upload its decoded frames (D3DSurfaces) for it. Waiting for the VideoDecodeContext API to be finalized. BUG=none TEST=none Review URL: http://codereview.chromium.org/3431009 git-svn-id: svn://svn.chromium.org/chrome/trunk/src@59874 0039d316-1c4b-4281-b951-d872f2087c98
Diffstat (limited to 'media')
-rw-r--r--media/video/mft_h264_decode_engine_context.cc179
-rw-r--r--media/video/mft_h264_decode_engine_context.h70
2 files changed, 249 insertions, 0 deletions
diff --git a/media/video/mft_h264_decode_engine_context.cc b/media/video/mft_h264_decode_engine_context.cc
new file mode 100644
index 0000000..1759ced
--- /dev/null
+++ b/media/video/mft_h264_decode_engine_context.cc
@@ -0,0 +1,179 @@
+// Copyright (c) 2010 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "media/video/mft_h264_decode_engine_context.h"
+
+#include <algorithm>
+#include <vector>
+
+#include <d3d9.h>
+
+#include "base/task.h"
+#include "media/base/callback.h"
+
+#pragma comment(lib, "dxva2.lib")
+#pragma comment(lib, "d3d9.lib")
+
+using base::TimeDelta;
+
+namespace media {
+
+static D3DFORMAT VideoFrameToD3DFormat(VideoFrame::Format format) {
+ switch (format) {
+ case VideoFrame::RGB555:
+ return D3DFMT_X1R5G5B5;
+ case VideoFrame::RGB565:
+ return D3DFMT_R5G6B5;
+ case VideoFrame::RGB32:
+ return D3DFMT_X8R8G8B8;
+ case VideoFrame::RGBA:
+ return D3DFMT_A8R8G8B8;
+ default:
+ // Note that although there is a corresponding type for VideoFrame::RGB24
+ // (D3DFMT_R8G8B8), it is not supported by render targets.
+ NOTREACHED() << "Unsupported format";
+ return D3DFMT_UNKNOWN;
+ }
+}
+
+static IDirect3DTexture9* GetTexture(scoped_refptr<VideoFrame> frame) {
+ return static_cast<IDirect3DTexture9*>(frame->d3d_texture(0));
+}
+
+static void ReleaseTexture(scoped_refptr<VideoFrame> frame) {
+ GetTexture(frame)->Release();
+}
+
+static void ReleaseTextures(
+ const std::vector<scoped_refptr<VideoFrame> >& frames) {
+ std::for_each(frames.begin(), frames.end(), ReleaseTexture);
+}
+
+MftH264DecodeEngineContext::MftH264DecodeEngineContext(HWND device_window)
+ : initialized_(false),
+ device_window_(device_window),
+ d3d9_(NULL),
+ device_(NULL) {
+ DCHECK(device_window);
+}
+
+MftH264DecodeEngineContext::~MftH264DecodeEngineContext() {
+}
+
+// TODO(imcheng): This should set the success variable once the API is
+// finalized.
+void MftH264DecodeEngineContext::Initialize(Task* task) {
+ AutoTaskRunner runner(task);
+ if (initialized_)
+ return;
+ d3d9_ = Direct3DCreate9(D3D_SDK_VERSION);
+ if (!d3d9_) {
+ LOG(ERROR) << "Direct3DCreate9 failed";
+ return;
+ }
+
+ D3DPRESENT_PARAMETERS present_params = {0};
+ present_params.BackBufferWidth = 0;
+ present_params.BackBufferHeight = 0;
+ present_params.BackBufferFormat = D3DFMT_UNKNOWN;
+ present_params.BackBufferCount = 1;
+ present_params.SwapEffect = D3DSWAPEFFECT_DISCARD;
+ present_params.hDeviceWindow = device_window_;
+ present_params.Windowed = TRUE;
+ present_params.Flags = D3DPRESENTFLAG_VIDEO;
+ present_params.FullScreen_RefreshRateInHz = 0;
+ present_params.PresentationInterval = 0;
+
+ HRESULT hr = d3d9_->CreateDevice(D3DADAPTER_DEFAULT,
+ D3DDEVTYPE_HAL,
+ device_window_,
+ (D3DCREATE_HARDWARE_VERTEXPROCESSING |
+ D3DCREATE_MULTITHREADED),
+ &present_params,
+ device_.Receive());
+ if (FAILED(hr)) {
+ LOG(ERROR) << "CreateDevice failed " << std::hex << hr;
+ return;
+ }
+ initialized_ = true;
+}
+
+void* MftH264DecodeEngineContext::GetDevice() {
+ return device_.get();
+}
+
+void MftH264DecodeEngineContext::AllocateVideoFrames(
+ int n, size_t width, size_t height, VideoFrame::Format format,
+ std::vector<scoped_refptr<VideoFrame> >* frames,
+ Task* task) {
+ DCHECK(initialized_);
+ DCHECK_GT(n, 0);
+ DCHECK(frames);
+
+ AutoTaskRunner runner(task);
+ D3DFORMAT d3d_format = VideoFrameToD3DFormat(format);
+ std::vector<scoped_refptr<VideoFrame> > temp_frames;
+ temp_frames.reserve(n);
+ HRESULT hr;
+ for (int i = 0; i < n; i++) {
+ IDirect3DTexture9* texture = NULL;
+ hr = device_->CreateTexture(width, height, 1, D3DUSAGE_RENDERTARGET,
+ d3d_format, D3DPOOL_DEFAULT, &texture, NULL);
+ if (FAILED(hr)) {
+ LOG(ERROR) << "CreateTexture " << i << " failed " << std::hex << hr;
+ ReleaseTextures(temp_frames);
+ return;
+ }
+ VideoFrame::D3dTexture texture_array[VideoFrame::kMaxPlanes] =
+ { texture, texture, texture };
+ scoped_refptr<VideoFrame> texture_frame;
+ VideoFrame::CreateFrameD3dTexture(format, width, height, texture_array,
+ TimeDelta(), TimeDelta(), &texture_frame);
+ if (!texture_frame.get()) {
+ LOG(ERROR) << "CreateFrameD3dTexture " << i << " failed";
+ texture->Release();
+ ReleaseTextures(temp_frames);
+ return;
+ }
+ temp_frames.push_back(texture_frame);
+ }
+ frames->assign(temp_frames.begin(), temp_frames.end());
+ managed_frames_.insert(managed_frames_.end(),
+ temp_frames.begin(), temp_frames.end());
+}
+
+bool MftH264DecodeEngineContext::UploadToVideoFrame(
+ void* source, scoped_refptr<VideoFrame> frame) {
+ DCHECK(initialized_);
+ DCHECK(source);
+ DCHECK(frame.get());
+
+ IDirect3DSurface9* surface = static_cast<IDirect3DSurface9*>(source);
+ IDirect3DTexture9* texture = GetTexture(frame);
+ ScopedComPtr<IDirect3DSurface9> top_surface;
+ HRESULT hr;
+ hr = texture->GetSurfaceLevel(0, top_surface.Receive());
+ if (FAILED(hr)) {
+ LOG(ERROR) << "GetSurfaceLevel failed " << std::hex << hr;
+ return false;
+ }
+ hr = device_->StretchRect(surface, NULL, top_surface.get(), NULL,
+ D3DTEXF_NONE);
+ if (FAILED(hr)) {
+ LOG(ERROR) << "StretchRect failed " << std::hex << hr;
+ return false;
+ }
+ return true;
+}
+
+void MftH264DecodeEngineContext::ReleaseAllVideoFrames() {
+ ReleaseTextures(managed_frames_);
+ managed_frames_.clear();
+}
+
+void MftH264DecodeEngineContext::Destroy(Task* task) {
+ AutoTaskRunner runner(task);
+}
+
+} // namespace media
diff --git a/media/video/mft_h264_decode_engine_context.h b/media/video/mft_h264_decode_engine_context.h
new file mode 100644
index 0000000..d33f06c
--- /dev/null
+++ b/media/video/mft_h264_decode_engine_context.h
@@ -0,0 +1,70 @@
+// Copyright (c) 2010 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+//
+// Video decode context for MftH264DecodeEngine. This context manages
+// VideoFrame objects for the DXVA-enabled MFT H.264 decode engine, and
+// converts its output (which is IDirect3DSurface9) into IDirect3DTexture9
+// (wrapped in a VideoFrame object), which will be compatible with ANGLE.
+
+#ifndef MEDIA_VIDEO_MFT_H264_DECODE_ENGINE_CONTEXT_H_
+#define MEDIA_VIDEO_MFT_H264_DECODE_ENGINE_CONTEXT_H_
+
+#include <vector>
+
+#include "base/scoped_comptr_win.h"
+#include "media/base/video_frame.h"
+#include "media/video/video_decode_context.h"
+
+class Task;
+
+struct IDirect3D9;
+extern "C" const GUID IID_IDirect3D9;
+struct IDirect3DDevice9;
+extern "C" const GUID IID_IDirect3DDevice9;
+
+namespace media {
+
+// TODO(imcheng): Make it implement VideoDecodeContext once the API
+// is finalized.
+class MftH264DecodeEngineContext {
+ public:
+ // Constructs a MftH264DecodeEngineContext with the D3D device attached
+ // to |device_window|. This device does not own the window, so the caller
+ // must destroy the window explicitly after the destruction of this object.
+ explicit MftH264DecodeEngineContext(HWND device_window);
+ virtual ~MftH264DecodeEngineContext();
+
+ // TODO(imcheng): Is this a part of the API?
+ virtual void Initialize(Task* task);
+
+ // Gets the underlying IDirect3DDevice9.
+ virtual void* GetDevice();
+
+ // Allocates IDirect3DTexture9 objects wrapped in VideoFrame objects.
+ virtual void AllocateVideoFrames(
+ int n, size_t width, size_t height, VideoFrame::Format format,
+ std::vector<scoped_refptr<VideoFrame> >* frames,
+ Task* task);
+
+ // TODO(imcheng): Make this follow the API once it is finalized.
+ // Uploads the decoded frame (IDirect3DSurface9) to a VideoFrame allocated
+ // by AllocateVideoFrames().
+ virtual bool UploadToVideoFrame(void* source,
+ scoped_refptr<VideoFrame> frame);
+ virtual void ReleaseAllVideoFrames();
+ virtual void Destroy(Task* task);
+
+ bool initialized() const { return initialized_; }
+
+ private:
+ bool initialized_;
+ HWND device_window_;
+ std::vector<scoped_refptr<VideoFrame> > managed_frames_;
+ ScopedComPtr<IDirect3D9, &IID_IDirect3D9> d3d9_;
+ ScopedComPtr<IDirect3DDevice9, &IID_IDirect3DDevice9> device_;
+};
+
+} // namespace media
+
+#endif // MEDIA_VIDEO_MFT_H264_DECODE_ENGINE_CONTEXT_H_