summaryrefslogtreecommitdiffstats
path: root/media
diff options
context:
space:
mode:
authorimcheng@chromium.org <imcheng@chromium.org@0039d316-1c4b-4281-b951-d872f2087c98>2010-08-25 00:18:44 +0000
committerimcheng@chromium.org <imcheng@chromium.org@0039d316-1c4b-4281-b951-d872f2087c98>2010-08-25 00:18:44 +0000
commit12b37dc8e0ac8395342754e49f6ac4ab48e08015 (patch)
tree86609d30a2085ff0f707a9fd3e9d2d02fff2fa00 /media
parent94c8bdb5af3b423d72ff3fb6e20bb6592e0362f2 (diff)
downloadchromium_src-12b37dc8e0ac8395342754e49f6ac4ab48e08015.zip
chromium_src-12b37dc8e0ac8395342754e49f6ac4ab48e08015.tar.gz
chromium_src-12b37dc8e0ac8395342754e49f6ac4ab48e08015.tar.bz2
Changed mft_h264_decoder's API to match with video_decode_engine.h. Also changed the example and unittests.
Implementation of mft_h264_decoder.cc is similar to ffmpeg_video_decode_engine.cc. For now, a d3d surface output from the decoder is converted from NV12 to YV12 and stored in regular memory. So rendering with --enable-dxva is a bit slower for now. Once we figure out how to connect with ANGLE, we won't need the conversion / transfer. basic_renderer.* is now replaced with a simpler implementation of EventHandler inside the example code. d3d_util.* is also removed because d3d9 initialization is now moved inside the initialization of the decoder and it is the only place where it is used. BUG=none TEST=included in this patch Review URL: http://codereview.chromium.org/3156046 git-svn-id: svn://svn.chromium.org/chrome/trunk/src@57261 0039d316-1c4b-4281-b951-d872f2087c98
Diffstat (limited to 'media')
-rw-r--r--media/filters/video_decode_engine.h3
-rw-r--r--media/media.gyp6
-rw-r--r--media/mf/README.chromium46
-rw-r--r--media/mf/basic_renderer.cc220
-rw-r--r--media/mf/basic_renderer.h60
-rw-r--r--media/mf/d3d_util.cc89
-rw-r--r--media/mf/d3d_util.h34
-rw-r--r--media/mf/file_reader_util.cc1
-rw-r--r--media/mf/mft_h264_decoder.cc886
-rw-r--r--media/mf/mft_h264_decoder.h174
-rw-r--r--media/mf/mft_h264_decoder_example.cc280
-rw-r--r--media/mf/test/mft_h264_decoder_unittest.cc575
-rw-r--r--media/mf/test/run_all_unittests.cc2
13 files changed, 1044 insertions, 1332 deletions
diff --git a/media/filters/video_decode_engine.h b/media/filters/video_decode_engine.h
index 3098fe1..f1faf7b 100644
--- a/media/filters/video_decode_engine.h
+++ b/media/filters/video_decode_engine.h
@@ -72,6 +72,7 @@ class VideoDecodeEngine : public base::RefCountedThreadSafe<VideoDecodeEngine> {
public:
struct EventHandler {
public:
+ virtual ~EventHandler() {}
virtual void OnInitializeComplete(const VideoCodecInfo& info) = 0;
virtual void OnUninitializeComplete() = 0;
virtual void OnFlushComplete() = 0;
@@ -95,7 +96,7 @@ class VideoDecodeEngine : public base::RefCountedThreadSafe<VideoDecodeEngine> {
const VideoCodecConfig& config) = 0;
// Uninitialize the engine. Engine should destroy all resources and call
- // EventHandler::OnInitializeDone().
+ // EventHandler::OnUninitializeComplete().
virtual void Uninitialize() = 0;
// Flush the engine. Engine should return all the buffers to owner ( which
diff --git a/media/media.gyp b/media/media.gyp
index 45815e72..5f3e42f 100644
--- a/media/media.gyp
+++ b/media/media.gyp
@@ -466,10 +466,6 @@
'..',
],
'sources': [
- 'mf/basic_renderer.cc',
- 'mf/basic_renderer.h',
- 'mf/d3d_util.cc',
- 'mf/d3d_util.h',
'mf/file_reader_util.cc',
'mf/file_reader_util.h',
'mf/mft_h264_decoder_example.cc',
@@ -495,8 +491,6 @@
'..',
],
'sources': [
- 'mf/d3d_util.cc',
- 'mf/d3d_util.h',
'mf/file_reader_util.cc',
'mf/file_reader_util.h',
'mf/test/mft_h264_decoder_unittest.cc',
diff --git a/media/mf/README.chromium b/media/mf/README.chromium
index 5ef5f64..73f1deb 100644
--- a/media/mf/README.chromium
+++ b/media/mf/README.chromium
@@ -1,23 +1,23 @@
-This tool demonstrates the use of the Media Foundation H.264 decoder as a
-standalone Media Foundation Transform (MFT). The H.264 decoder takes sample
-objects (IMFSample) containing Annex B streams as input, and outputs decoded
-YV12/NV12 video frames as output, contained in a buffer object (if DXVA is not
-enabled) or a Direct3D surface (if DXVA is enabled.)
-
-This tool uses ffmpeg's parser and bitstream converter to read a file
-containing H.264 video and outputs packets containing Annex B streams which are
-then fed into the H.264 decoder. This tool also demonstrates the use of the
-H.264 decoder using callbacks.
-
-Requirements: Windows 7
-
-Note1: On some video files, there is a mysterious 1-off decoded frame count
-when DXVA is enabled.
-
-Note2: This tool requires the ffmpeg library to have the H.264 codec and Annex
-B bitstream filter. You might need build your own, or grab one from
-http://ffmpeg.arrozcru.org/autobuilds/
-
-Note3: A single H264Mft instance is only for 1 H.264 video stream only.
-Inputting streams consisting of more than 1 video to a single instance
-may result in undefined behavior.
+This tool demonstrates the use of the Media Foundation H.264 decoder as a
+standalone Media Foundation Transform (MFT). The H.264 decoder takes sample
+objects (IMFSample) containing Annex B streams as input, and outputs decoded
+YV12/NV12 video frames as output, contained in a buffer object (if DXVA is not
+enabled) or a Direct3D surface (if DXVA is enabled.)
+
+This tool uses ffmpeg's parser and bitstream converter to read a file
+containing H.264 video and outputs packets containing Annex B streams which are
+then fed into the H.264 decoder. This tool also demonstrates the use of the
+H.264 decoder using callbacks.
+
+Requirements: Windows 7
+
+Note1: On some video files, there is a mysterious 1-off decoded frame count
+when DXVA is enabled.
+
+Note2: This tool requires the ffmpeg library to have the H.264 codec and Annex
+B bitstream filter. You might need build your own, or grab one from
+http://ffmpeg.arrozcru.org/autobuilds/
+
+Note3: A single H264Mft instance is only for 1 H.264 video stream only.
+Inputting streams consisting of more than 1 video to a single instance
+may result in undefined behavior.
diff --git a/media/mf/basic_renderer.cc b/media/mf/basic_renderer.cc
deleted file mode 100644
index d1eddb9..0000000
--- a/media/mf/basic_renderer.cc
+++ /dev/null
@@ -1,220 +0,0 @@
-// Copyright (c) 2010 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "media/mf/basic_renderer.h"
-
-#include <d3d9.h>
-#include <mfapi.h>
-#include <mfidl.h>
-
-#include "base/message_loop.h"
-#include "base/scoped_comptr_win.h"
-#include "media/base/yuv_convert.h"
-
-// For MFGetService and MF_BUFFER_SERVICE (getting D3D surface from buffer)
-#pragma comment(lib, "mf.lib")
-#pragma comment(lib, "strmiids.lib")
-
-namespace media {
-
-// Converts the given raw data buffer into RGB32 format, and drawing the result
-// into the given window. This is only used when DXVA2 is not enabled.
-// Returns: true on success.
-bool ConvertToRGBAndDrawToWindow(HWND video_window, uint8* data, int width,
- int height, int stride) {
- CHECK(video_window != NULL);
- CHECK(data != NULL);
- CHECK_GT(width, 0);
- CHECK_GT(height, 0);
- CHECK_GE(stride, width);
- height = (height + 15) & ~15;
- bool success = true;
- uint8* y_start = reinterpret_cast<uint8*>(data);
- uint8* u_start = y_start + height * stride * 5 / 4;
- uint8* v_start = y_start + height * stride;
- static uint8* rgb_frame = new uint8[height * stride * 4];
- int y_stride = stride;
- int uv_stride = stride / 2;
- int rgb_stride = stride * 4;
- ConvertYUVToRGB32(y_start, u_start, v_start, rgb_frame,
- width, height, y_stride, uv_stride,
- rgb_stride, YV12);
- PAINTSTRUCT ps;
- InvalidateRect(video_window, NULL, TRUE);
- HDC hdc = BeginPaint(video_window, &ps);
- BITMAPINFOHEADER hdr;
- hdr.biSize = sizeof(BITMAPINFOHEADER);
- hdr.biWidth = width;
- hdr.biHeight = -height; // minus means top-down bitmap
- hdr.biPlanes = 1;
- hdr.biBitCount = 32;
- hdr.biCompression = BI_RGB; // no compression
- hdr.biSizeImage = 0;
- hdr.biXPelsPerMeter = 1;
- hdr.biYPelsPerMeter = 1;
- hdr.biClrUsed = 0;
- hdr.biClrImportant = 0;
- int rv = StretchDIBits(hdc, 0, 0, width, height, 0, 0, width, height,
- rgb_frame, reinterpret_cast<BITMAPINFO*>(&hdr),
- DIB_RGB_COLORS, SRCCOPY);
- if (rv == 0) {
- LOG(ERROR) << "StretchDIBits failed";
- MessageLoopForUI::current()->QuitNow();
- success = false;
- }
- EndPaint(video_window, &ps);
-
- return success;
-}
-
-// Obtains the underlying raw data buffer for the given IMFMediaBuffer, and
-// calls ConvertToRGBAndDrawToWindow() with it.
-// Returns: true on success.
-bool PaintMediaBufferOntoWindow(HWND video_window, IMFMediaBuffer* video_buffer,
- int width, int height, int stride) {
- CHECK(video_buffer != NULL);
- HRESULT hr;
- BYTE* data;
- DWORD buffer_length;
- DWORD data_length;
- hr = video_buffer->Lock(&data, &buffer_length, &data_length);
- if (FAILED(hr)) {
- LOG(ERROR) << "Failed to lock IMFMediaBuffer";
- return false;
- }
- if (!ConvertToRGBAndDrawToWindow(video_window,
- reinterpret_cast<uint8*>(data),
- width,
- height,
- stride)) {
- LOG(ERROR) << "Failed to convert raw buffer to RGB and draw to window";
- video_buffer->Unlock();
- return false;
- }
- video_buffer->Unlock();
- return true;
-}
-
-// Obtains the D3D9 surface from the given IMFMediaBuffer, then calls methods
-// in the D3D device to draw to the window associated with it.
-// Returns: true on success.
-bool PaintD3D9BufferOntoWindow(IDirect3DDevice9* device,
- IMFMediaBuffer* video_buffer) {
- CHECK(device != NULL);
- ScopedComPtr<IDirect3DSurface9> surface;
- HRESULT hr = MFGetService(video_buffer, MR_BUFFER_SERVICE,
- IID_PPV_ARGS(surface.Receive()));
- if (FAILED(hr)) {
- LOG(ERROR) << "Failed to get D3D9 surface from buffer";
- return false;
- }
- hr = device->Clear(0, NULL, D3DCLEAR_TARGET, D3DCOLOR_XRGB(0, 0, 0),
- 1.0f, 0);
- if (FAILED(hr)) {
- LOG(ERROR) << "Device->Clear() failed";
- return false;
- }
- ScopedComPtr<IDirect3DSurface9> backbuffer;
- hr = device->GetBackBuffer(0, 0, D3DBACKBUFFER_TYPE_MONO,
- backbuffer.Receive());
- if (FAILED(hr)) {
- LOG(ERROR) << "Device->GetBackBuffer() failed";
- return false;
- }
- hr = device->StretchRect(surface.get(), NULL, backbuffer.get(), NULL,
- D3DTEXF_NONE);
- if (FAILED(hr)) {
- LOG(ERROR) << "Device->StretchRect() failed";
- return false;
- }
- hr = device->Present(NULL, NULL, NULL, NULL);
- if (FAILED(hr)) {
- if (hr == E_FAIL) {
- LOG(WARNING) << "Present() returned E_FAIL";
- } else {
- static int frames_dropped = 0;
- LOG(ERROR) << "Device->Present() failed "
- << std::hex << std::showbase << hr;
- if (++frames_dropped == 10) {
- LOG(ERROR) << "Dropped too many frames, quitting";
- MessageLoopForUI::current()->QuitNow();
- return false;
- }
- }
- }
- return true;
-}
-
-static void ReleaseOutputBuffer(VideoFrame* frame) {
- if (frame != NULL &&
- frame->type() == VideoFrame::TYPE_MFBUFFER ||
- frame->type() == VideoFrame::TYPE_DIRECT3DSURFACE) {
- static_cast<IMFMediaBuffer*>(frame->private_buffer())->Release();
- }
-}
-
-// NullRenderer
-
-NullRenderer::NullRenderer(MftH264Decoder* decoder) : MftRenderer(decoder) {}
-NullRenderer::~NullRenderer() {}
-
-void NullRenderer::ProcessFrame(scoped_refptr<VideoFrame> frame) {
- ReleaseOutputBuffer(frame);
- MessageLoop::current()->PostTask(
- FROM_HERE, NewRunnableMethod(decoder_.get(),
- &MftH264Decoder::GetOutput));
-}
-
-void NullRenderer::StartPlayback() {
- MessageLoop::current()->PostTask(
- FROM_HERE, NewRunnableMethod(decoder_.get(),
- &MftH264Decoder::GetOutput));
-}
-
-void NullRenderer::OnDecodeError(MftH264Decoder::Error error) {
- MessageLoop::current()->Quit();
-}
-
-// BasicRenderer
-
-BasicRenderer::BasicRenderer(MftH264Decoder* decoder,
- HWND window, IDirect3DDevice9* device)
- : MftRenderer(decoder),
- window_(window),
- device_(device) {
-}
-
-BasicRenderer::~BasicRenderer() {}
-
-void BasicRenderer::ProcessFrame(scoped_refptr<VideoFrame> frame) {
- MessageLoopForUI::current()->PostDelayedTask(
- FROM_HERE, NewRunnableMethod(decoder_.get(),
- &MftH264Decoder::GetOutput),
- frame->GetDuration().InMilliseconds());
- if (device_ != NULL) {
- if (!PaintD3D9BufferOntoWindow(device_,
- static_cast<IMFMediaBuffer*>(frame->private_buffer()))) {
- MessageLoopForUI::current()->QuitNow();
- }
- } else {
- if (!PaintMediaBufferOntoWindow(
- window_, static_cast<IMFMediaBuffer*>(frame->private_buffer()),
- frame->width(), frame->height(), frame->stride(0))) {
- MessageLoopForUI::current()->QuitNow();
- }
- }
- ReleaseOutputBuffer(frame);
-}
-
-void BasicRenderer::StartPlayback() {
- MessageLoopForUI::current()->PostTask(
- FROM_HERE, NewRunnableMethod(decoder_.get(),
- &MftH264Decoder::GetOutput));
-}
-
-void BasicRenderer::OnDecodeError(MftH264Decoder::Error error) {
- MessageLoopForUI::current()->Quit();
-}
-
-} // namespace media
diff --git a/media/mf/basic_renderer.h b/media/mf/basic_renderer.h
deleted file mode 100644
index 367446d..0000000
--- a/media/mf/basic_renderer.h
+++ /dev/null
@@ -1,60 +0,0 @@
-// Copyright (c) 2010 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-//
-// Short / basic implementation to simulate rendering H.264 frames outputs by
-// MF's H.264 decoder to screen.
-
-#ifndef MEDIA_MF_BASIC_RENDERER_H_
-#define MEDIA_MF_BASIC_RENDERER_H_
-
-#include <d3d9.h>
-
-#include "base/scoped_ptr.h"
-#include "base/scoped_comptr_win.h"
-#include "media/base/video_frame.h"
-#include "media/mf/mft_h264_decoder.h"
-
-namespace media {
-
-class MftRenderer : public base::RefCountedThreadSafe<MftRenderer> {
- public:
- explicit MftRenderer(MftH264Decoder* decoder) : decoder_(decoder) {}
- virtual ~MftRenderer() {}
- virtual void ProcessFrame(scoped_refptr<VideoFrame> frame) = 0;
- virtual void StartPlayback() = 0;
- virtual void OnDecodeError(MftH264Decoder::Error error) = 0;
-
- protected:
- scoped_refptr<MftH264Decoder> decoder_;
-};
-
-// This renderer does nothing with the frame except discarding it.
-class NullRenderer : public MftRenderer {
- public:
- explicit NullRenderer(MftH264Decoder* decoder);
- virtual ~NullRenderer();
- virtual void ProcessFrame(scoped_refptr<VideoFrame> frame);
- virtual void StartPlayback();
- virtual void OnDecodeError(MftH264Decoder::Error error);
-};
-
-// This renderer does a basic playback by drawing to |window_|. It tries to
-// respect timing specified in the recevied VideoFrames.
-class BasicRenderer : public MftRenderer {
- public:
- explicit BasicRenderer(MftH264Decoder* decoder,
- HWND window, IDirect3DDevice9* device);
- virtual ~BasicRenderer();
- virtual void ProcessFrame(scoped_refptr<VideoFrame> frame);
- virtual void StartPlayback();
- virtual void OnDecodeError(MftH264Decoder::Error error);
-
- private:
- HWND window_;
- ScopedComPtr<IDirect3DDevice9> device_;
-};
-
-} // namespace media
-
-#endif // MEDIA_MF_BASIC_RENDERER_H_
diff --git a/media/mf/d3d_util.cc b/media/mf/d3d_util.cc
deleted file mode 100644
index a6639e4..0000000
--- a/media/mf/d3d_util.cc
+++ /dev/null
@@ -1,89 +0,0 @@
-// Copyright (c) 2010 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "media/mf/d3d_util.h"
-
-#include <d3d9.h>
-#include <dxva2api.h>
-
-#include "base/scoped_comptr_win.h"
-
-namespace media {
-
-IDirect3DDeviceManager9* CreateD3DDevManager(HWND video_window,
- IDirect3D9** direct3d,
- IDirect3DDevice9** device) {
- ScopedComPtr<IDirect3DDeviceManager9> dev_manager;
- ScopedComPtr<IDirect3D9> d3d;
- d3d.Attach(Direct3DCreate9(D3D_SDK_VERSION));
- if (d3d == NULL) {
- LOG(ERROR) << "Failed to create D3D9";
- return NULL;
- }
- D3DPRESENT_PARAMETERS present_params = {0};
-
- // Once we know the dimensions, we need to reset using
- // AdjustD3DDeviceBackBufferDimensions().
- present_params.BackBufferWidth = 0;
- present_params.BackBufferHeight = 0;
- present_params.BackBufferFormat = D3DFMT_UNKNOWN;
- present_params.BackBufferCount = 1;
- present_params.SwapEffect = D3DSWAPEFFECT_DISCARD;
- present_params.hDeviceWindow = video_window;
- present_params.Windowed = TRUE;
- present_params.Flags = D3DPRESENTFLAG_VIDEO;
- present_params.FullScreen_RefreshRateInHz = 0;
- present_params.PresentationInterval = 0;
-
- ScopedComPtr<IDirect3DDevice9> temp_device;
-
- // D3DCREATE_HARDWARE_VERTEXPROCESSING specifies hardware vertex processing.
- // (Is it even needed for just video decoding?)
- HRESULT hr = d3d->CreateDevice(D3DADAPTER_DEFAULT,
- D3DDEVTYPE_HAL,
- NULL,
- D3DCREATE_HARDWARE_VERTEXPROCESSING,
- &present_params,
- temp_device.Receive());
- if (FAILED(hr)) {
- LOG(ERROR) << "Failed to create D3D Device";
- return NULL;
- }
- UINT dev_manager_reset_token = 0;
- hr = DXVA2CreateDirect3DDeviceManager9(&dev_manager_reset_token,
- dev_manager.Receive());
- if (FAILED(hr)) {
- LOG(ERROR) << "Couldn't create D3D Device manager";
- return NULL;
- }
- hr = dev_manager->ResetDevice(temp_device.get(), dev_manager_reset_token);
- if (FAILED(hr)) {
- LOG(ERROR) << "Failed to set device to device manager";
- return NULL;
- }
- *direct3d = d3d.Detach();
- *device = temp_device.Detach();
- return dev_manager.Detach();
-}
-
-bool AdjustD3DDeviceBackBufferDimensions(IDirect3DDevice9* device,
- HWND video_window,
- int width,
- int height) {
- D3DPRESENT_PARAMETERS present_params = {0};
- present_params.BackBufferWidth = width;
- present_params.BackBufferHeight = height;
- present_params.BackBufferFormat = D3DFMT_UNKNOWN;
- present_params.BackBufferCount = 1;
- present_params.SwapEffect = D3DSWAPEFFECT_DISCARD;
- present_params.hDeviceWindow = video_window;
- present_params.Windowed = TRUE;
- present_params.Flags = D3DPRESENTFLAG_VIDEO;
- present_params.FullScreen_RefreshRateInHz = 0;
- present_params.PresentationInterval = 0;
-
- return SUCCEEDED(device->Reset(&present_params)) ? true : false;
-}
-
-} // namespace media
diff --git a/media/mf/d3d_util.h b/media/mf/d3d_util.h
deleted file mode 100644
index 307eec57..0000000
--- a/media/mf/d3d_util.h
+++ /dev/null
@@ -1,34 +0,0 @@
-// Copyright (c) 2010 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-//
-// Utility functions for Direct3D Devices.
-
-#ifndef MEDIA_MF_D3D_UTIL_H_
-#define MEDIA_MF_D3D_UTIL_H_
-
-#include <windows.h>
-
-struct IDirect3D9;
-struct IDirect3DDevice9;
-struct IDirect3DDeviceManager9;
-
-namespace media {
-
-// Creates a Direct3D device manager for the given window.
-IDirect3DDeviceManager9* CreateD3DDevManager(HWND video_window,
- IDirect3D9** direct3d,
- IDirect3DDevice9** device);
-
-// Resets the D3D device to prevent scaling from happening because it was
-// created with window before resizing occurred. We need to change the back
-// buffer dimensions to the actual video frame dimensions.
-// Both the decoder and device should be initialized before calling this method.
-// Returns: true if successful.
-bool AdjustD3DDeviceBackBufferDimensions(IDirect3DDevice9* device,
- HWND video_window,
- int width,
- int height);
-
-} // namespace media
-#endif // MEDIA_MF_D3D_UTIL_H_
diff --git a/media/mf/file_reader_util.cc b/media/mf/file_reader_util.cc
index dcfc114..d18afe6 100644
--- a/media/mf/file_reader_util.cc
+++ b/media/mf/file_reader_util.cc
@@ -178,7 +178,6 @@ int64 FFmpegFileReader::TimeBaseToMicroseconds(
int64 FFmpegFileReader::MicrosecondsToTimeBase(
int64 time_base_unit) const {
- // ffmpeg.
CHECK(codec_context_) << "Codec context needs to be initialized";
return time_base_unit * codec_context_->time_base.den / 2000 /
codec_context_->time_base.num;
diff --git a/media/mf/mft_h264_decoder.cc b/media/mf/mft_h264_decoder.cc
index 3762c27..22a42f4 100644
--- a/media/mf/mft_h264_decoder.cc
+++ b/media/mf/mft_h264_decoder.cc
@@ -2,48 +2,30 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "media/mf/mft_h264_decoder.h"
+#include "build/build_config.h" // For OS_WIN.
+
+#if defined(OS_WIN)
-#include <algorithm>
-#include <string>
+#include "media/mf/mft_h264_decoder.h"
#include <d3d9.h>
+#include <dxva2api.h>
#include <evr.h>
#include <initguid.h>
#include <mfapi.h>
#include <mferror.h>
-#include <mfidl.h>
-#include <shlwapi.h>
#include <wmcodecdsp.h>
-#include "base/callback.h"
-#include "base/logging.h"
+#include "base/time.h"
#include "base/message_loop.h"
-#include "base/scoped_comptr_win.h"
-#include "media/base/data_buffer.h"
-#include "media/base/video_frame.h"
-#pragma comment(lib, "d3d9.lib")
#pragma comment(lib, "dxva2.lib")
-#pragma comment(lib, "evr.lib")
-#pragma comment(lib, "mfuuid.lib")
+#pragma comment(lib, "d3d9.lib")
+#pragma comment(lib, "mf.lib")
#pragma comment(lib, "mfplat.lib")
+#pragma comment(lib, "strmiids.lib")
-namespace media {
-
-// Returns Media Foundation's H.264 decoder as an MFT, or NULL if not found
-// (e.g. Not using Windows 7)
-static IMFTransform* GetH264Decoder() {
- // Use __uuidof() to avoid linking to a library just for the CLSID.
- IMFTransform* dec;
- HRESULT hr = CoCreateInstance(__uuidof(CMSH264DecoderMFT), NULL,
- CLSCTX_INPROC_SERVER, IID_PPV_ARGS(&dec));
- if (FAILED(hr)) {
- LOG(ERROR) << "CoCreateInstance failed " << std::hex << std::showbase << hr;
- return NULL;
- }
- return dec;
-}
+namespace {
// Creates an empty Media Foundation sample with no buffers.
static IMFSample* CreateEmptySample() {
@@ -73,7 +55,9 @@ static IMFSample* CreateEmptySampleWithBuffer(int buffer_length, int align) {
// with the align argument being 0.
hr = MFCreateMemoryBuffer(buffer_length, buffer.Receive());
} else {
- hr = MFCreateAlignedMemoryBuffer(buffer_length, align-1, buffer.Receive());
+ hr = MFCreateAlignedMemoryBuffer(buffer_length,
+ align - 1,
+ buffer.Receive());
}
if (FAILED(hr)) {
LOG(ERROR) << "Unable to create an empty buffer";
@@ -147,244 +131,174 @@ static IMFSample* CreateInputSample(const uint8* stream, int size,
return sample.Detach();
}
-// Public methods
+} // namespace
+
+namespace media {
+
+// public methods
MftH264Decoder::MftH264Decoder(bool use_dxva)
- : read_input_callback_(NULL),
- output_avail_callback_(NULL),
- output_error_callback_(NULL),
+ : use_dxva_(use_dxva),
+ d3d9_(NULL),
+ device_(NULL),
+ device_manager_(NULL),
+ device_window_(NULL),
decoder_(NULL),
- initialized_(false),
- use_dxva_(use_dxva),
- drain_message_sent_(false),
- next_frame_discontinuous_(false),
- in_buffer_size_(0),
- in_buffer_alignment_(0),
- out_buffer_size_(0),
- out_buffer_alignment_(0),
- frames_read_(0),
- frames_decoded_(0),
- width_(0),
- height_(0),
- stride_(0),
- output_format_(use_dxva ? MFVideoFormat_NV12 : MFVideoFormat_YV12) {
+ input_stream_info_(),
+ output_stream_info_(),
+ state_(kUninitialized),
+ event_handler_(NULL) {
+ memset(&config_, 0, sizeof(config_));
+ memset(&info_, 0, sizeof(info_));
}
MftH264Decoder::~MftH264Decoder() {
- // |decoder_| has to be destroyed before the library uninitialization.
- if (decoder_)
- decoder_->Release();
- if (FAILED(MFShutdown())) {
- LOG(WARNING) << "Warning: MF failed to shutdown";
- }
- CoUninitialize();
}
-bool MftH264Decoder::Init(IDirect3DDeviceManager9* dev_manager,
- int frame_rate_num, int frame_rate_denom,
- int width, int height,
- int aspect_num, int aspect_denom,
- ReadInputCallback* read_input_cb,
- OutputReadyCallback* output_avail_cb,
- OutputErrorCallback* output_error_cb) {
- if (initialized_)
- return true;
- if (!read_input_cb || !output_avail_cb || !output_error_cb) {
- LOG(ERROR) << "Callbacks missing in Init";
- return false;
+void MftH264Decoder::Initialize(
+ MessageLoop* message_loop,
+ VideoDecodeEngine::EventHandler* event_handler,
+ const VideoCodecConfig& config) {
+ LOG(INFO) << "MftH264Decoder::Initialize";
+ if (state_ != kUninitialized) {
+ LOG(ERROR) << "Initialize: invalid state";
+ return;
+ }
+ if (!message_loop || !event_handler) {
+ LOG(ERROR) << "MftH264Decoder::Initialize: parameters cannot be NULL";
+ return;
+ }
+
+ config_ = config;
+ event_handler_ = event_handler;
+
+ info_.provides_buffers_ = true;
+
+ // TODO(jiesun): Actually it is more likely an NV12 D3DSuface9.
+ // Until we had hardware composition working.
+ if (use_dxva_) {
+ info_.stream_info_.surface_format_ = VideoFrame::YV12;
+ info_.stream_info_.surface_type_ = VideoFrame::TYPE_SYSTEM_MEMORY;
+ } else {
+ info_.stream_info_.surface_format_ = VideoFrame::YV12;
+ info_.stream_info_.surface_type_ = VideoFrame::TYPE_SYSTEM_MEMORY;
}
- read_input_callback_.reset(read_input_cb);
- output_avail_callback_.reset(output_avail_cb);
- output_error_callback_.reset(output_error_cb);
- if (!InitComMfLibraries())
- return false;
- if (!InitDecoder(dev_manager, frame_rate_num, frame_rate_denom,
- width, height, aspect_num, aspect_denom))
- return false;
- if (!GetStreamsInfoAndBufferReqs())
- return false;
- if (!SendStartMessage())
- return false;
- initialized_ = true;
- return true;
-}
-static const char* const ProcessOutputStatusToCString(HRESULT hr) {
- if (hr == MF_E_TRANSFORM_STREAM_CHANGE)
- return "media stream change occurred, need to set output type";
- if (hr == MF_E_TRANSFORM_NEED_MORE_INPUT)
- return "decoder needs more samples";
- else
- return "unhandled error from ProcessOutput";
+ // codec_info.stream_info_.surface_width_/height_ are initialized
+ // in InitInternal().
+ info_.success_ = InitInternal();
+ if (info_.success_) {
+ state_ = kNormal;
+ event_handler_->OnInitializeComplete(info_);
+ } else {
+ LOG(ERROR) << "MftH264Decoder::Initialize failed";
+ }
}
-void MftH264Decoder::GetOutput() {
- CHECK(initialized_);
+void MftH264Decoder::Uninitialize() {
+ LOG(INFO) << "MftH264Decoder::Uninitialize";
+ if (state_ == kUninitialized) {
+ LOG(ERROR) << "Uninitialize: invalid state";
+ return;
+ }
+
+ // TODO(imcheng):
+ // Cannot shutdown COM libraries here because the COM objects still needs
+ // to be Release()'ed. We can explicitly release them here, or move the
+ // uninitialize to GpuVideoService...
+ if (device_window_)
+ DestroyWindow(device_window_);
+ decoder_.Release();
+ device_manager_.Release();
+ device_.Release();
+ d3d9_.Release();
+ ShutdownComLibraries();
+ state_ = kUninitialized;
+ event_handler_->OnUninitializeComplete();
+}
- ScopedComPtr<IMFSample> output_sample;
- if (!use_dxva_) {
- // If DXVA is enabled, the decoder will allocate the sample for us.
- output_sample.Attach(CreateEmptySampleWithBuffer(out_buffer_size_,
- out_buffer_alignment_));
- if (!output_sample.get()) {
- LOG(ERROR) << "GetSample: failed to create empty output sample";
- output_error_callback_->Run(kNoMemory);
- return;
- }
+void MftH264Decoder::Flush() {
+ LOG(INFO) << "MftH264Decoder::Flush";
+ if (state_ != kNormal) {
+ LOG(ERROR) << "Flush: invalid state";
+ return;
}
- MFT_OUTPUT_DATA_BUFFER output_data_buffer;
- HRESULT hr;
- DWORD status;
- for (;;) {
- output_data_buffer.dwStreamID = 0;
- output_data_buffer.pSample = output_sample.get();
- output_data_buffer.dwStatus = 0;
- output_data_buffer.pEvents = NULL;
- hr = decoder_->ProcessOutput(0, // No flags
- 1, // # of out streams to pull from
- &output_data_buffer,
- &status);
- IMFCollection* events = output_data_buffer.pEvents;
- if (events) {
- LOG(INFO) << "Got events from ProcessOuput, but discarding";
- events->Release();
- }
- if (FAILED(hr)) {
- LOG(INFO) << "ProcessOutput failed with status " << std::hex << hr
- << ", meaning..." << ProcessOutputStatusToCString(hr);
- if (hr == MF_E_TRANSFORM_STREAM_CHANGE) {
- if (!SetDecoderOutputMediaType(output_format_)) {
- LOG(ERROR) << "Failed to reset output type";
- output_error_callback_->Run(kResetOutputStreamFailed);
- return;
- } else {
- LOG(INFO) << "Reset output type done";
- continue;
- }
- } else if (hr == MF_E_TRANSFORM_NEED_MORE_INPUT) {
- // If we have read everything then we should've sent a drain message
- // to the MFT. If the drain message is sent but it doesn't give out
- // anymore output then we know the decoder has processed everything.
- if (drain_message_sent_) {
- LOG(INFO) << "Drain message was already sent + no output => done";
- output_error_callback_->Run(kNoMoreOutput);
- return;
- } else {
- if (!ReadInput()) {
- LOG(INFO) << "Failed to read/process input. Sending drain message";
- if (!SendEndOfStreamMessage() || !SendDrainMessage()) {
- LOG(ERROR) << "Failed to send drain message";
- output_error_callback_->Run(kNoMoreOutput);
- return;
- }
- }
- continue;
- }
- } else {
- output_error_callback_->Run(kUnspecifiedError);
- return;
- }
- } else {
- // A decoded sample was successfully obtained.
- LOG(INFO) << "Got a decoded sample from decoder";
- if (use_dxva_) {
- // If dxva is enabled, we did not provide a sample to ProcessOutput,
- // i.e. output_sample is NULL.
- output_sample.Attach(output_data_buffer.pSample);
- if (!output_sample.get()) {
- LOG(ERROR) << "Output sample using DXVA is NULL - ProcessOutput did "
- << "not provide it!";
- output_error_callback_->Run(kOutputSampleError);
- return;
- }
- }
- int64 timestamp, duration;
- hr = output_sample->GetSampleTime(&timestamp);
- hr = output_sample->GetSampleDuration(&duration);
- if (FAILED(hr)) {
- LOG(ERROR) << "Failed to get sample duration or timestamp "
- << std::hex << hr;
- output_error_callback_->Run(kOutputSampleError);
- return;
- }
+ state_ = kFlushing;
+ if (!SendMFTMessage(MFT_MESSAGE_COMMAND_FLUSH)) {
+ LOG(WARNING) << "MftH264Decoder::Flush failed to send message";
+ }
+ state_ = kNormal;
+ event_handler_->OnFlushComplete();
+}
- // The duration and timestamps are in 100-ns units, so divide by 10
- // to convert to microseconds.
- timestamp /= 10;
- duration /= 10;
+void MftH264Decoder::Seek() {
+ if (state_ != kNormal) {
+ LOG(ERROR) << "Seek: invalid state";
+ return;
+ }
+ LOG(INFO) << "MftH264Decoder::Seek";
+ // Seek not implemented.
+ event_handler_->OnSeekComplete();
+}
- // Sanity checks for checking if there is really something in the sample.
- DWORD buf_count;
- hr = output_sample->GetBufferCount(&buf_count);
- if (FAILED(hr)) {
- LOG(ERROR) << "Failed to get buff count, hr = " << std::hex << hr;
- output_error_callback_->Run(kOutputSampleError);
- return;
- }
- if (buf_count == 0) {
- LOG(ERROR) << "buf_count is 0, dropping sample";
- output_error_callback_->Run(kOutputSampleError);
- return;
+void MftH264Decoder::EmptyThisBuffer(scoped_refptr<Buffer> buffer) {
+ LOG(INFO) << "MftH264Decoder::EmptyThisBuffer";
+ if (state_ == kUninitialized) {
+ LOG(ERROR) << "EmptyThisBuffer: invalid state";
+ }
+ ScopedComPtr<IMFSample> sample;
+ if (!buffer->IsEndOfStream()) {
+ sample.Attach(
+ CreateInputSample(buffer->GetData(),
+ buffer->GetDataSize(),
+ buffer->GetTimestamp().InMicroseconds() * 10,
+ buffer->GetDuration().InMicroseconds() * 10,
+ input_stream_info_.cbSize,
+ input_stream_info_.cbAlignment));
+ if (!sample.get()) {
+ LOG(ERROR) << "Failed to create an input sample";
+ } else {
+ if (FAILED(decoder_->ProcessInput(0, sample.get(), 0))) {
+ event_handler_->OnError();
}
- ScopedComPtr<IMFMediaBuffer> out_buffer;
- hr = output_sample->GetBufferByIndex(0, out_buffer.Receive());
- if (FAILED(hr)) {
- LOG(ERROR) << "Failed to get decoded output buffer";
- output_error_callback_->Run(kOutputSampleError);
- return;
+ }
+ } else {
+ if (state_ != MftH264Decoder::kEosDrain) {
+ // End of stream, send drain messages.
+ if (!SendMFTMessage(MFT_MESSAGE_NOTIFY_END_OF_STREAM) ||
+ !SendMFTMessage(MFT_MESSAGE_COMMAND_DRAIN)) {
+ LOG(ERROR) << "Failed to send EOS / drain messages to MFT";
+ event_handler_->OnError();
+ } else {
+ state_ = MftH264Decoder::kEosDrain;
}
-
- // To obtain the data, the caller should call the Lock() method instead
- // of using the data field.
- // In NV12, there are only 2 planes - the Y plane, and the interleaved UV
- // plane. Both have the same strides.
- uint8* null_data[3] = { NULL, NULL, NULL };
- int32 uv_stride = output_format_ == MFVideoFormat_NV12 ? stride_
- : stride_ / 2;
- int32 strides[3] = { stride_, uv_stride, uv_stride };
- scoped_refptr<VideoFrame> decoded_frame;
- VideoFrame::CreateFrameExternal(
- use_dxva_ ? VideoFrame::TYPE_DIRECT3DSURFACE :
- VideoFrame::TYPE_MFBUFFER,
- output_format_ == MFVideoFormat_NV12 ? VideoFrame::NV12
- : VideoFrame::YV12,
- width_,
- height_,
- 2,
- null_data,
- strides,
- base::TimeDelta::FromMicroseconds(timestamp),
- base::TimeDelta::FromMicroseconds(duration),
- out_buffer.Detach(),
- &decoded_frame);
- CHECK(decoded_frame.get());
- frames_decoded_++;
- output_avail_callback_->Run(decoded_frame);
- return;
}
}
+ DoDecode();
}
-bool MftH264Decoder::Flush() {
- CHECK(initialized_);
- HRESULT hr = decoder_->ProcessMessage(MFT_MESSAGE_COMMAND_FLUSH, NULL);
- if (FAILED(hr)) {
- LOG(ERROR) << "Failed to send the flush message to decoder";
- return false;
+void MftH264Decoder::FillThisBuffer(scoped_refptr<VideoFrame> frame) {
+ LOG(INFO) << "MftH264Decoder::FillThisBuffer";
+ if (state_ == kUninitialized) {
+ LOG(ERROR) << "FillThisBuffer: invalid state";
+ return;
}
- next_frame_discontinuous_ = true;
- return true;
+ scoped_refptr<Buffer> buffer;
+ event_handler_->OnEmptyBufferCallback(buffer);
}
-// Private methods
+// private methods
-bool MftH264Decoder::InitComMfLibraries() {
+// static
+bool MftH264Decoder::StartupComLibraries() {
HRESULT hr;
- hr = CoInitializeEx(NULL, COINIT_APARTMENTTHREADED | COINIT_DISABLE_OLE1DDE);
+ hr = CoInitializeEx(NULL,
+ COINIT_APARTMENTTHREADED | COINIT_DISABLE_OLE1DDE);
if (FAILED(hr)) {
LOG(ERROR) << "CoInit fail";
return false;
}
+
hr = MFStartup(MF_VERSION, MFSTARTUP_FULL);
if (FAILED(hr)) {
LOG(ERROR) << "MFStartup fail";
@@ -394,121 +308,170 @@ bool MftH264Decoder::InitComMfLibraries() {
return true;
}
-bool MftH264Decoder::InitDecoder(IDirect3DDeviceManager9* dev_manager,
- int frame_rate_num, int frame_rate_denom,
- int width, int height,
- int aspect_num, int aspect_denom) {
- decoder_ = GetH264Decoder();
- if (!decoder_)
+// static
+void MftH264Decoder::ShutdownComLibraries() {
+ HRESULT hr;
+ hr = MFShutdown();
+ if (FAILED(hr)) {
+ LOG(WARNING) << "Warning: MF failed to shutdown";
+ }
+ CoUninitialize();
+}
+
+bool MftH264Decoder::CreateD3DDevManager() {
+ d3d9_.Attach(Direct3DCreate9(D3D_SDK_VERSION));
+ if (d3d9_.get() == NULL) {
+ LOG(ERROR) << "Failed to create D3D9";
+ return false;
+ }
+ static const TCHAR windowName[] = TEXT("MFT Decoder Hidden Window");
+ static const TCHAR className[] = TEXT("STATIC");
+ device_window_ = CreateWindowEx(WS_EX_NOACTIVATE,
+ className,
+ windowName,
+ WS_DISABLED | WS_POPUP,
+ 0, 0, 1, 1,
+ HWND_MESSAGE,
+ NULL,
+ GetModuleHandle(NULL),
+ NULL);
+ CHECK(device_window_);
+
+ D3DPRESENT_PARAMETERS present_params = {0};
+ present_params.BackBufferWidth = 1;
+ present_params.BackBufferHeight = 1;
+ present_params.BackBufferFormat = D3DFMT_UNKNOWN;
+ present_params.BackBufferCount = 1;
+ present_params.SwapEffect = D3DSWAPEFFECT_DISCARD;
+ present_params.hDeviceWindow = device_window_;
+ present_params.Windowed = TRUE;
+ present_params.Flags = D3DPRESENTFLAG_VIDEO;
+ present_params.FullScreen_RefreshRateInHz = 0;
+ present_params.PresentationInterval = 0;
+
+ // D3DCREATE_HARDWARE_VERTEXPROCESSING specifies hardware vertex processing.
+ // (Is it even needed for just video decoding?)
+ HRESULT hr = d3d9_->CreateDevice(D3DADAPTER_DEFAULT,
+ D3DDEVTYPE_HAL,
+ device_window_,
+ D3DCREATE_HARDWARE_VERTEXPROCESSING,
+ &present_params,
+ device_.Receive());
+ if (FAILED(hr)) {
+ LOG(ERROR) << "Failed to create D3D Device";
return false;
- if (use_dxva_ && !SetDecoderD3d9Manager(dev_manager))
+ }
+
+ UINT dev_manager_reset_token = 0;
+ hr = DXVA2CreateDirect3DDeviceManager9(&dev_manager_reset_token,
+ device_manager_.Receive());
+ if (FAILED(hr)) {
+ LOG(ERROR) << "Couldn't create D3D Device manager";
return false;
- if (!SetDecoderMediaTypes(frame_rate_num, frame_rate_denom,
- width, height,
- aspect_num, aspect_denom)) {
+ }
+
+ hr = device_manager_->ResetDevice(device_.get(),
+ dev_manager_reset_token);
+ if (FAILED(hr)) {
+ LOG(ERROR) << "Failed to set device to device manager";
return false;
}
return true;
}
-bool MftH264Decoder::SetDecoderD3d9Manager(
- IDirect3DDeviceManager9* dev_manager) {
- if (!use_dxva_) {
- LOG(ERROR) << "SetDecoderD3d9Manager should only be called if DXVA is "
- << "enabled";
+bool MftH264Decoder::InitInternal() {
+ if (!StartupComLibraries())
return false;
- }
- if (!dev_manager) {
- LOG(ERROR) << "dev_manager cannot be NULL";
+ if (use_dxva_ && !CreateD3DDevManager())
+ return false;
+ if (!InitDecoder())
+ return false;
+ if (!GetStreamsInfoAndBufferReqs())
+ return false;
+ return SendMFTMessage(MFT_MESSAGE_NOTIFY_BEGIN_STREAMING);
+}
+
+bool MftH264Decoder::InitDecoder() {
+ // TODO(jiesun): use MFEnum to get decoder CLSID.
+ HRESULT hr = CoCreateInstance(__uuidof(CMSH264DecoderMFT),
+ NULL,
+ CLSCTX_INPROC_SERVER,
+ __uuidof(IMFTransform),
+ reinterpret_cast<void**>(decoder_.Receive()));
+ if (FAILED(hr) || !decoder_.get()) {
+ LOG(ERROR) << "CoCreateInstance failed " << std::hex << std::showbase << hr;
return false;
}
- HRESULT hr;
- hr = decoder_->ProcessMessage(MFT_MESSAGE_SET_D3D_MANAGER,
- reinterpret_cast<ULONG_PTR>(dev_manager));
- if (FAILED(hr)) {
- LOG(ERROR) << "Failed to set D3D9 device to decoder";
+
+ if (!CheckDecoderDxvaSupport())
return false;
+
+ if (use_dxva_) {
+ hr = decoder_->ProcessMessage(
+ MFT_MESSAGE_SET_D3D_MANAGER,
+ reinterpret_cast<ULONG_PTR>(device_manager_.get()));
+ if (FAILED(hr)) {
+ LOG(ERROR) << "Failed to set D3D9 device to decoder";
+ return false;
+ }
}
- return true;
+
+ return SetDecoderMediaTypes();
}
-bool MftH264Decoder::SetDecoderMediaTypes(int frame_rate_num,
- int frame_rate_denom,
- int width, int height,
- int aspect_num, int aspect_denom) {
- DCHECK(decoder_);
- if (!SetDecoderInputMediaType(frame_rate_num, frame_rate_denom,
- width, height,
- aspect_num, aspect_denom))
+bool MftH264Decoder::CheckDecoderDxvaSupport() {
+ ScopedComPtr<IMFAttributes> attributes;
+ HRESULT hr = decoder_->GetAttributes(attributes.Receive());
+ if (FAILED(hr)) {
+ LOG(ERROR) << "Unlock: Failed to get attributes, hr = "
+ << std::hex << std::showbase << hr;
return false;
- if (!SetDecoderOutputMediaType(output_format_)) {
+ }
+
+ UINT32 dxva;
+ hr = attributes->GetUINT32(MF_SA_D3D_AWARE, &dxva);
+ if (FAILED(hr) || !dxva) {
+ LOG(ERROR) << "Failed to get DXVA attr, hr = "
+ << std::hex << std::showbase << hr
+ << "this might not be the right decoder.";
return false;
}
return true;
}
-bool MftH264Decoder::SetDecoderInputMediaType(int frame_rate_num,
- int frame_rate_denom,
- int width, int height,
- int aspect_num,
- int aspect_denom) {
+bool MftH264Decoder::SetDecoderMediaTypes() {
+ if (!SetDecoderInputMediaType())
+ return false;
+ return SetDecoderOutputMediaType(use_dxva_ ? MFVideoFormat_NV12
+ : MFVideoFormat_YV12);
+}
+
+bool MftH264Decoder::SetDecoderInputMediaType() {
ScopedComPtr<IMFMediaType> media_type;
- HRESULT hr;
- hr = MFCreateMediaType(media_type.Receive());
+ HRESULT hr = MFCreateMediaType(media_type.Receive());
if (FAILED(hr)) {
LOG(ERROR) << "Failed to create empty media type object";
return false;
}
+
hr = media_type->SetGUID(MF_MT_MAJOR_TYPE, MFMediaType_Video);
if (FAILED(hr)) {
LOG(ERROR) << "SetGUID for major type failed";
return false;
}
+
hr = media_type->SetGUID(MF_MT_SUBTYPE, MFVideoFormat_H264);
if (FAILED(hr)) {
LOG(ERROR) << "SetGUID for subtype failed";
return false;
}
- // Provide additional info to the decoder to avoid a format change during
- // streaming.
- if (frame_rate_num > 0 && frame_rate_denom > 0) {
- hr = MFSetAttributeRatio(media_type.get(), MF_MT_FRAME_RATE,
- frame_rate_num, frame_rate_denom);
- if (FAILED(hr)) {
- LOG(ERROR) << "Failed to set frame rate";
- return false;
- }
- }
- if (width > 0 && height > 0) {
- hr = MFSetAttributeSize(media_type.get(), MF_MT_FRAME_SIZE, width, height);
- if (FAILED(hr)) {
- LOG(ERROR) << "Failed to set frame size";
- return false;
- }
- }
-
- // TODO(imcheng): Not sure about this, but this is the recommended value by
- // MSDN.
- hr = media_type->SetUINT32(MF_MT_INTERLACE_MODE,
- MFVideoInterlace_MixedInterlaceOrProgressive);
- if (FAILED(hr)) {
- LOG(ERROR) << "Failed to set interlace mode";
- return false;
- }
- if (aspect_num > 0 && aspect_denom > 0) {
- hr = MFSetAttributeRatio(media_type.get(), MF_MT_PIXEL_ASPECT_RATIO,
- aspect_num, aspect_denom);
- if (FAILED(hr)) {
- LOG(ERROR) << "Failed to get aspect ratio";
- return false;
- }
- }
hr = decoder_->SetInputType(0, media_type.get(), 0); // No flags
if (FAILED(hr)) {
LOG(ERROR) << "Failed to set decoder's input type";
return false;
}
+
return true;
}
@@ -518,53 +481,36 @@ bool MftH264Decoder::SetDecoderOutputMediaType(const GUID subtype) {
bool found = false;
while (SUCCEEDED(decoder_->GetOutputAvailableType(0, i, &out_media_type))) {
GUID out_subtype;
- HRESULT hr;
- hr = out_media_type->GetGUID(MF_MT_SUBTYPE, &out_subtype);
+ HRESULT hr = out_media_type->GetGUID(MF_MT_SUBTYPE, &out_subtype);
if (FAILED(hr)) {
LOG(ERROR) << "Failed to GetGUID() on GetOutputAvailableType() " << i;
out_media_type->Release();
continue;
}
if (out_subtype == subtype) {
- LOG(INFO) << "|subtype| is at index "
- << i << " in GetOutputAvailableType()";
hr = decoder_->SetOutputType(0, out_media_type, 0); // No flags
hr = MFGetAttributeSize(out_media_type, MF_MT_FRAME_SIZE,
- reinterpret_cast<UINT32*>(&width_),
- reinterpret_cast<UINT32*>(&height_));
- hr = MFGetStrideForBitmapInfoHeader(output_format_.Data1,
- width_,
- reinterpret_cast<LONG*>(&stride_));
+ reinterpret_cast<UINT32*>(&info_.stream_info_.surface_width_),
+ reinterpret_cast<UINT32*>(&info_.stream_info_.surface_height_));
+ config_.width_ = info_.stream_info_.surface_width_;
+ config_.height_ = info_.stream_info_.surface_height_;
if (FAILED(hr)) {
LOG(ERROR) << "Failed to SetOutputType to |subtype| or obtain "
- << "width/height/stride " << std::hex << hr;
+ << "width/height " << std::hex << hr;
} else {
- found = true;
out_media_type->Release();
- break;
+ return true;
}
}
i++;
out_media_type->Release();
}
- if (!found) {
- LOG(ERROR) << "|subtype| was not found in GetOutputAvailableType()";
- return false;
- }
- return true;
+ return false;
}
-bool MftH264Decoder::SendStartMessage() {
- HRESULT hr;
- hr = decoder_->ProcessMessage(MFT_MESSAGE_NOTIFY_BEGIN_STREAMING, NULL);
- if (FAILED(hr)) {
- LOG(ERROR) << "Process start message failed, hr = "
- << std::hex << std::showbase << hr;
- return false;
- } else {
- LOG(INFO) << "Sent a message to decoder to indicate start of stream";
- return true;
- }
+bool MftH264Decoder::SendMFTMessage(MFT_MESSAGE_TYPE msg) {
+ HRESULT hr = decoder_->ProcessMessage(msg, NULL);
+ return SUCCEEDED(hr);
}
// Prints out info about the input/output streams, gets the minimum buffer sizes
@@ -573,129 +519,225 @@ bool MftH264Decoder::SendStartMessage() {
// to do it ourselves and make sure they're the correct size.
// Exception is when dxva is enabled, the decoder will allocate output.
bool MftH264Decoder::GetStreamsInfoAndBufferReqs() {
- DCHECK(decoder_);
- HRESULT hr;
- MFT_INPUT_STREAM_INFO input_stream_info;
- hr = decoder_->GetInputStreamInfo(0, &input_stream_info);
+ HRESULT hr = decoder_->GetInputStreamInfo(0, &input_stream_info_);
if (FAILED(hr)) {
LOG(ERROR) << "Failed to get input stream info";
return false;
}
LOG(INFO) << "Input stream info: ";
- LOG(INFO) << "Max latency: " << input_stream_info.hnsMaxLatency;
+ LOG(INFO) << "Max latency: " << input_stream_info_.hnsMaxLatency;
// There should be three flags, one for requiring a whole frame be in a
// single sample, one for requiring there be one buffer only in a single
// sample, and one that specifies a fixed sample size. (as in cbSize)
LOG(INFO) << "Flags: "
- << std::hex << std::showbase << input_stream_info.dwFlags;
- CHECK_EQ(input_stream_info.dwFlags, 0x7u);
- LOG(INFO) << "Min buffer size: " << input_stream_info.cbSize;
- LOG(INFO) << "Max lookahead: " << input_stream_info.cbMaxLookahead;
- LOG(INFO) << "Alignment: " << input_stream_info.cbAlignment;
- in_buffer_alignment_ = input_stream_info.cbAlignment;
- in_buffer_size_ = input_stream_info.cbSize;
-
- MFT_OUTPUT_STREAM_INFO output_stream_info;
- hr = decoder_->GetOutputStreamInfo(0, &output_stream_info);
+ << std::hex << std::showbase << input_stream_info_.dwFlags;
+ CHECK_EQ(input_stream_info_.dwFlags, 0x7u);
+ LOG(INFO) << "Min buffer size: " << input_stream_info_.cbSize;
+ LOG(INFO) << "Max lookahead: " << input_stream_info_.cbMaxLookahead;
+ LOG(INFO) << "Alignment: " << input_stream_info_.cbAlignment;
+
+ hr = decoder_->GetOutputStreamInfo(0, &output_stream_info_);
if (FAILED(hr)) {
LOG(ERROR) << "Failed to get output stream info";
return false;
}
LOG(INFO) << "Output stream info: ";
-
// The flags here should be the same and mean the same thing, except when
// DXVA is enabled, there is an extra 0x100 flag meaning decoder will
// allocate its own sample.
LOG(INFO) << "Flags: "
- << std::hex << std::showbase << output_stream_info.dwFlags;
- CHECK_EQ(output_stream_info.dwFlags, use_dxva_ ? 0x107u : 0x7u);
- LOG(INFO) << "Min buffer size: " << output_stream_info.cbSize;
- LOG(INFO) << "Alignment: " << output_stream_info.cbAlignment;
- out_buffer_alignment_ = output_stream_info.cbAlignment;
- out_buffer_size_ = output_stream_info.cbSize;
+ << std::hex << std::showbase << output_stream_info_.dwFlags;
+ CHECK_EQ(output_stream_info_.dwFlags, use_dxva_ ? 0x107u : 0x7u);
+ LOG(INFO) << "Min buffer size: " << output_stream_info_.cbSize;
+ LOG(INFO) << "Alignment: " << output_stream_info_.cbAlignment;
return true;
}
-bool MftH264Decoder::ReadInput() {
- scoped_refptr<DataBuffer> input;
- read_input_callback_->Run(&input);
- if (!input.get() || input->IsEndOfStream()) {
- LOG(INFO) << "No more input";
+bool MftH264Decoder::DoDecode() {
+ if (state_ != kNormal && state_ != kEosDrain) {
+ LOG(ERROR) << "DoDecode: not in normal or drain state";
return false;
- } else {
- // We read an input stream, we can feed it into the decoder.
- return SendInput(input->GetData(), input->GetDataSize(),
- input->GetTimestamp().InMicroseconds() * 10,
- input->GetDuration().InMicroseconds() * 10);
}
-}
-
-bool MftH264Decoder::SendInput(const uint8* data, int size, int64 timestamp,
- int64 duration) {
- CHECK(initialized_);
- CHECK(data);
- CHECK_GT(size, 0);
+ scoped_refptr<VideoFrame> frame;
+ ScopedComPtr<IMFSample> output_sample;
+ if (!use_dxva_) {
+ output_sample.Attach(
+ CreateEmptySampleWithBuffer(output_stream_info_.cbSize,
+ output_stream_info_.cbAlignment));
+ if (!output_sample.get()) {
+ LOG(ERROR) << "GetSample: failed to create empty output sample";
+ event_handler_->OnError();
+ return false;
+ }
+ }
+ MFT_OUTPUT_DATA_BUFFER output_data_buffer;
+ memset(&output_data_buffer, 0, sizeof(output_data_buffer));
+ output_data_buffer.dwStreamID = 0;
+ output_data_buffer.pSample = output_sample;
- bool current_frame_discontinuous = next_frame_discontinuous_;
- next_frame_discontinuous_ = true;
+ DWORD status;
+ HRESULT hr = decoder_->ProcessOutput(0, // No flags
+ 1, // # of out streams to pull from
+ &output_data_buffer,
+ &status);
- if (drain_message_sent_) {
- LOG(ERROR) << "Drain message was already sent, but trying to send more "
- << "input to decoder";
- return false;
+ IMFCollection* events = output_data_buffer.pEvents;
+ if (events != NULL) {
+ LOG(INFO) << "Got events from ProcessOuput, but discarding";
+ events->Release();
}
- ScopedComPtr<IMFSample> sample;
- sample.Attach(CreateInputSample(data, size, timestamp, duration,
- in_buffer_size_, in_buffer_alignment_));
- if (!sample.get()) {
- LOG(ERROR) << "Failed to convert input stream to sample";
- return false;
- }
- HRESULT hr;
- if (current_frame_discontinuous) {
- hr = sample->SetUINT32(MFSampleExtension_Discontinuity, TRUE);
- if (FAILED(hr)) {
- LOG(ERROR) << "Failed to set sample discontinuity " << std::hex << hr;
+
+ if (FAILED(hr)) {
+ if (hr == MF_E_TRANSFORM_STREAM_CHANGE) {
+ hr = SetDecoderOutputMediaType(use_dxva_ ? MFVideoFormat_NV12
+ : MFVideoFormat_YV12);
+ if (SUCCEEDED(hr)) {
+ event_handler_->OnFormatChange(info_.stream_info_);
+ return true;
+ } else {
+ event_handler_->OnError();
+ return false;
+ }
+ } else if (hr == MF_E_TRANSFORM_NEED_MORE_INPUT) {
+ if (state_ == kEosDrain) {
+ // No more output from the decoder. Notify EOS and stop playback.
+ scoped_refptr<VideoFrame> frame;
+ VideoFrame::CreateEmptyFrame(&frame);
+ event_handler_->OnFillBufferCallback(frame);
+ state_ = MftH264Decoder::kStopped;
+ return false;
+ }
+ return true;
+ } else {
+ LOG(ERROR) << "Unhandled error in DoDecode()";
+ state_ = MftH264Decoder::kStopped;
+ event_handler_->OnError();
+ return false;
}
}
- hr = decoder_->ProcessInput(0, sample.get(), 0);
- if (FAILED(hr)) {
- LOG(ERROR) << "Failed to ProcessInput, hr = " << std::hex << hr;
- return false;
+
+ // We succeeded in getting an output sample.
+ if (use_dxva_) {
+ // For DXVA we didn't provide the sample, i.e. output_sample was NULL.
+ output_sample.Attach(output_data_buffer.pSample);
+ }
+ if (!output_sample.get()) {
+ LOG(ERROR) << "ProcessOutput succeeded, but did not get a sample back";
+ event_handler_->OnError();
+ return true;
}
- frames_read_++;
- next_frame_discontinuous_ = false;
- return true;
-}
-bool MftH264Decoder::SendEndOfStreamMessage() {
- CHECK(initialized_);
- // Send the eos message with no parameters.
- HRESULT hr = decoder_->ProcessMessage(MFT_MESSAGE_NOTIFY_END_OF_STREAM, 0);
- if (FAILED(hr)) {
- LOG(ERROR) << "Failed to send the drain message to decoder";
- return false;
+ int64 timestamp = 0, duration = 0;
+ if (FAILED(output_sample->GetSampleTime(&timestamp)) ||
+ FAILED(output_sample->GetSampleDuration(&duration))) {
+ LOG(WARNING) << "Failed to get timestamp/duration from output";
}
- return true;
-}
-bool MftH264Decoder::SendDrainMessage() {
- CHECK(initialized_);
- if (drain_message_sent_) {
- LOG(ERROR) << "Drain message was already sent before!";
- return false;
+ // The duration and timestamps are in 100-ns units, so divide by 10
+ // to convert to microseconds.
+ timestamp /= 10;
+ duration /= 10;
+
+ // Sanity checks for checking if there is really something in the sample.
+ DWORD buf_count;
+ hr = output_sample->GetBufferCount(&buf_count);
+ if (FAILED(hr) || buf_count != 1) {
+ LOG(ERROR) << "Failed to get buffer count, or buffer count mismatch";
+ return true;
}
- // Send the drain message with no parameters.
- HRESULT hr = decoder_->ProcessMessage(MFT_MESSAGE_COMMAND_DRAIN, NULL);
+ ScopedComPtr<IMFMediaBuffer> output_buffer;
+ hr = output_sample->GetBufferByIndex(0, output_buffer.Receive());
if (FAILED(hr)) {
- LOG(ERROR) << "Failed to send the drain message to decoder";
- return false;
+ LOG(ERROR) << "Failed to get buffer from sample";
+ return true;
+ }
+
+ VideoFrame::CreateFrame(info_.stream_info_.surface_format_,
+ info_.stream_info_.surface_width_,
+ info_.stream_info_.surface_height_,
+ base::TimeDelta::FromMicroseconds(timestamp),
+ base::TimeDelta::FromMicroseconds(duration),
+ &frame);
+ if (!frame.get()) {
+ LOG(ERROR) << "Failed to allocate video frame";
+ event_handler_->OnError();
+ return true;
}
- drain_message_sent_ = true;
+
+ if (use_dxva_) {
+ // temporary until we figure out how to send a D3D9 surface handle.
+ ScopedComPtr<IDirect3DSurface9> surface;
+ hr = MFGetService(output_buffer, MR_BUFFER_SERVICE,
+ IID_PPV_ARGS(surface.Receive()));
+ if (FAILED(hr))
+ return true;
+
+ // TODO(imcheng):
+ // This is causing some problems (LockRect does not work always).
+ // We won't need this when we figure out how to use the d3d
+ // surface directly.
+ // NV12 to YV12
+ D3DLOCKED_RECT d3dlocked_rect;
+ hr = surface->LockRect(&d3dlocked_rect, NULL, D3DLOCK_READONLY);
+ if (FAILED(hr)) {
+ LOG(ERROR) << "LockRect";
+ return true;
+ }
+ D3DSURFACE_DESC desc;
+ hr = surface->GetDesc(&desc);
+ if (FAILED(hr)) {
+ LOG(ERROR) << "GetDesc";
+ CHECK(SUCCEEDED(surface->UnlockRect()));
+ return true;
+ }
+
+ uint32 src_stride = d3dlocked_rect.Pitch;
+ uint32 dst_stride = config_.width_;
+ uint8* src_y = static_cast<uint8*>(d3dlocked_rect.pBits);
+ uint8* src_uv = src_y + src_stride * desc.Height;
+ uint8* dst_y = static_cast<uint8*>(frame->data(VideoFrame::kYPlane));
+ uint8* dst_u = static_cast<uint8*>(frame->data(VideoFrame::kVPlane));
+ uint8* dst_v = static_cast<uint8*>(frame->data(VideoFrame::kUPlane));
+
+ for (int y = 0; y < config_.height_; ++y) {
+ for (int x = 0; x < config_.width_; ++x) {
+ dst_y[x] = src_y[x];
+ if (!(y & 1)) {
+ if (x & 1)
+ dst_v[x>>1] = src_uv[x];
+ else
+ dst_u[x>>1] = src_uv[x];
+ }
+ }
+ dst_y += dst_stride;
+ src_y += src_stride;
+ if (!(y & 1)) {
+ src_uv += src_stride;
+ dst_v += dst_stride >> 1;
+ dst_u += dst_stride >> 1;
+ }
+ }
+ CHECK(SUCCEEDED(surface->UnlockRect()));
+ } else {
+ // Not DXVA.
+ uint8* src_y;
+ DWORD max_length, current_length;
+ HRESULT hr = output_buffer->Lock(&src_y, &max_length, &current_length);
+ if (FAILED(hr))
+ return true;
+ uint8* dst_y = static_cast<uint8*>(frame->data(VideoFrame::kYPlane));
+
+ memcpy(dst_y, src_y, current_length);
+ CHECK(SUCCEEDED(output_buffer->Unlock()));
+ }
+ // TODO(jiesun): non-System memory case
+ event_handler_->OnFillBufferCallback(frame);
return true;
}
} // namespace media
+
+#endif // defined(OS_WIN)
diff --git a/media/mf/mft_h264_decoder.h b/media/mf/mft_h264_decoder.h
index 355cb4c..bcb45c4 100644
--- a/media/mf/mft_h264_decoder.h
+++ b/media/mf/mft_h264_decoder.h
@@ -2,146 +2,96 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
//
-// Decodes H.264 Annex B streams using the Media Foundation H.264 decoder as
-// a standalone Media Foundation Transform (MFT).
-// Note: A single MftH264Decoder instance is only for 1 H.264 video stream only.
-// Inputting streams consisting of more than 1 video to a single instance
-// may result in undefined behavior.
+// MFT H.264 decoder.
#ifndef MEDIA_MF_MFT_H264_DECODER_H_
#define MEDIA_MF_MFT_H264_DECODER_H_
-#include <string>
+#include "build/build_config.h" // For OS_WIN.
+#if defined(OS_WIN)
+
+#include <deque>
+
+#include <d3d9.h>
+#include <dxva2api.h>
#include <mfidl.h>
-#include "base/basictypes.h"
-#include "base/callback.h"
#include "base/gtest_prod_util.h"
-#include "base/scoped_ptr.h"
+#include "base/scoped_comptr_win.h"
+#include "media/filters/video_decode_engine.h"
-struct IDirect3DDeviceManager9;
-struct IMFTransform;
+class MessageLoop;
namespace media {
-class DataBuffer;
-class VideoFrame;
-
-// A decoder that takes samples of Annex B streams then outputs decoded frames.
-class MftH264Decoder : public base::RefCountedThreadSafe<MftH264Decoder> {
+class MftH264Decoder : public media::VideoDecodeEngine {
public:
- enum Error {
- kResetOutputStreamFailed = 0,
- kNoMoreOutput,
- kUnspecifiedError,
- kNoMemory,
- kOutputSampleError
- };
- typedef Callback1<scoped_refptr<DataBuffer>*>::Type
- ReadInputCallback;
- typedef Callback1<scoped_refptr<VideoFrame> >::Type OutputReadyCallback;
- typedef Callback1<Error>::Type OutputErrorCallback;
+ typedef enum {
+ kUninitialized, // un-initialized.
+ kNormal, // normal playing state.
+ kFlushing, // upon received Flush(), before FlushDone()
+ kEosDrain, // upon input EOS received.
+ kStopped, // upon output EOS received.
+ } State;
explicit MftH264Decoder(bool use_dxva);
~MftH264Decoder();
+ virtual void Initialize(MessageLoop* message_loop,
+ media::VideoDecodeEngine::EventHandler* event_handler,
+ const VideoCodecConfig& config);
+ virtual void Uninitialize();
+ virtual void Flush();
+ virtual void Seek();
+ virtual void EmptyThisBuffer(scoped_refptr<Buffer> buffer);
+ virtual void FillThisBuffer(scoped_refptr<VideoFrame> frame);
- // Initializes the decoder. |dev_manager| is not required if the decoder does
- // not use DXVA.
- // If the other arguments are not known, leave them as 0. They can be
- // provided to the decoder to try to avoid an initial output format change,
- // but it is not necessary to have them.
- // The object takes ownership of the callbacks. However, the caller must
- // make sure the objects associated with the callbacks outlives the time
- // when GetOutput() will be called.
- bool Init(IDirect3DDeviceManager9* dev_manager,
- int frame_rate_num, int frame_rate_denom,
- int width, int height,
- int aspect_num, int aspect_denom,
- ReadInputCallback* read_input_cb,
- OutputReadyCallback* output_avail_cb,
- OutputErrorCallback* output_error_cb);
-
- // Tries to get an output sample from the decoder, and if successful, calls
- // the callback with the sample, or status of the decoder if an error
- // occurred.
- void GetOutput();
- bool Flush();
-
- bool initialized() const { return initialized_; }
bool use_dxva() const { return use_dxva_; }
- bool drain_message_sent() const { return drain_message_sent_; }
- int in_buffer_size() const { return in_buffer_size_; }
- int out_buffer_size() const { return out_buffer_size_; }
- int frames_read() const { return frames_read_; }
- int frames_decoded() const { return frames_decoded_; }
- int width() const { return width_; }
- int height() const { return height_; }
+ State state() const { return state_; }
private:
friend class MftH264DecoderTest;
- FRIEND_TEST_ALL_PREFIXES(MftH264DecoderTest,
- SendDrainMessageBeforeInitDeathTest);
- FRIEND_TEST_ALL_PREFIXES(MftH264DecoderTest, SendDrainMessageAtInit);
- FRIEND_TEST_ALL_PREFIXES(MftH264DecoderTest, DrainOnEndOfInputStream);
- FRIEND_TEST_ALL_PREFIXES(MftH264DecoderTest, NoOutputOnGarbageInput);
-
- bool InitComMfLibraries();
- bool InitDecoder(IDirect3DDeviceManager9* dev_manager,
- int frame_rate_num, int frame_rate_denom,
- int width, int height,
- int aspect_num, int aspect_denom);
- bool SetDecoderD3d9Manager(IDirect3DDeviceManager9* dev_manager);
- bool SetDecoderMediaTypes(int frame_rate_num, int frame_rate_denom,
- int width, int height,
- int aspect_num, int aspect_denom);
- bool SetDecoderInputMediaType(int frame_rate_num, int frame_rate_denom,
- int width, int height,
- int aspect_num, int aspect_denom);
+ FRIEND_TEST_ALL_PREFIXES(MftH264DecoderTest, LibraryInit);
+
+ // TODO(jiesun): Find a way to move all these to GpuVideoService..
+ static bool StartupComLibraries();
+ static void ShutdownComLibraries();
+ bool CreateD3DDevManager();
+
+ bool InitInternal();
+ bool InitDecoder();
+ bool CheckDecoderDxvaSupport();
+ bool SetDecoderMediaTypes();
+ bool SetDecoderInputMediaType();
bool SetDecoderOutputMediaType(const GUID subtype);
- bool SendStartMessage();
+ bool SendMFTMessage(MFT_MESSAGE_TYPE msg);
bool GetStreamsInfoAndBufferReqs();
- bool ReadInput();
-
- // Sends an Annex B stream to the decoder. The times here should be given
- // in 100ns units. This creates a IMFSample, copies the stream over to the
- // sample, and sends the sample to the decoder.
- // Returns: true if the sample was sent successfully.
- bool SendInput(const uint8* data, int size, int64 timestamp, int64 duration);
-
- bool SendEndOfStreamMessage();
-
- // Sends a drain message to the decoder to indicate no more input will be
- // sent. SendInput() should not be called after calling this method.
- // Returns: true if the drain message was sent successfully.
- bool SendDrainMessage();
-
- // |output_error_callback_| should stop the message loop.
- scoped_ptr<ReadInputCallback> read_input_callback_;
- scoped_ptr<OutputReadyCallback> output_avail_callback_;
- scoped_ptr<OutputErrorCallback> output_error_callback_;
- IMFTransform* decoder_;
- bool initialized_;
+
+ bool DoDecode();
+
+
bool use_dxva_;
- bool drain_message_sent_;
- bool next_frame_discontinuous_;
-
- // Minimum input and output buffer sizes/alignment required by the decoder.
- // If |buffer_alignment_| is zero, then the buffer needs not be aligned.
- int in_buffer_size_;
- int in_buffer_alignment_;
- int out_buffer_size_;
- int out_buffer_alignment_;
- int frames_read_;
- int frames_decoded_;
- int width_;
- int height_;
- int stride_;
- const GUID output_format_;
+
+ ScopedComPtr<IDirect3D9> d3d9_;
+ ScopedComPtr<IDirect3DDevice9> device_;
+ ScopedComPtr<IDirect3DDeviceManager9> device_manager_;
+ HWND device_window_;
+ ScopedComPtr<IMFTransform> decoder_;
+
+ MFT_INPUT_STREAM_INFO input_stream_info_;
+ MFT_OUTPUT_STREAM_INFO output_stream_info_;
+
+ State state_;
+
+ VideoDecodeEngine::EventHandler* event_handler_;
+ VideoCodecConfig config_;
+ VideoCodecInfo info_;
DISALLOW_COPY_AND_ASSIGN(MftH264Decoder);
};
} // namespace media
+#endif // defined(OS_WIN)
+
#endif // MEDIA_MF_MFT_H264_DECODER_H_
diff --git a/media/mf/mft_h264_decoder_example.cc b/media/mf/mft_h264_decoder_example.cc
index 6292957..5b66967 100644
--- a/media/mf/mft_h264_decoder_example.cc
+++ b/media/mf/mft_h264_decoder_example.cc
@@ -19,24 +19,27 @@
#include "base/scoped_comptr_win.h"
#include "base/scoped_ptr.h"
#include "base/time.h"
+#include "media/base/data_buffer.h"
#include "media/base/media.h"
#include "media/base/video_frame.h"
+#include "media/base/yuv_convert.h"
#include "media/ffmpeg/ffmpeg_common.h"
#include "media/ffmpeg/file_protocol.h"
-#include "media/mf/basic_renderer.h"
-#include "media/mf/d3d_util.h"
#include "media/mf/file_reader_util.h"
#include "media/mf/mft_h264_decoder.h"
using base::AtExitManager;
using base::Time;
using base::TimeDelta;
-using media::BasicRenderer;
-using media::NullRenderer;
+using media::Buffer;
+using media::DataBuffer;
using media::FFmpegFileReader;
using media::MftH264Decoder;
-using media::MftRenderer;
+using media::VideoCodecConfig;
+using media::VideoCodecInfo;
+using media::VideoDecodeEngine;
using media::VideoFrame;
+using media::VideoStreamInfo;
namespace {
@@ -64,16 +67,6 @@ static bool InitFFmpeg() {
return true;
}
-bool InitComLibrary() {
- HRESULT hr;
- hr = CoInitializeEx(NULL, COINIT_APARTMENTTHREADED | COINIT_DISABLE_OLE1DDE);
- if (FAILED(hr)) {
- LOG(ERROR) << "CoInit fail";
- return false;
- }
- return true;
-}
-
// Creates a window with the given width and height.
// Returns: A handle to the window on success, NULL otherwise.
static HWND CreateDrawWindow(int width, int height) {
@@ -103,6 +96,14 @@ static HWND CreateDrawWindow(int width, int height) {
LOG(ERROR) << "Failed to create window";
return NULL;
}
+ RECT rect;
+ rect.left = 0;
+ rect.right = width;
+ rect.top = 0;
+ rect.bottom = height;
+ AdjustWindowRect(&rect, kWindowStyleFlags, FALSE);
+ MoveWindow(window, 0, 0, rect.right - rect.left, rect.bottom - rect.top,
+ TRUE);
return window;
}
@@ -115,10 +116,8 @@ class WindowObserver : public base::MessagePumpWin::Observer {
virtual void WillProcessMessage(const MSG& msg) {
if (msg.message == WM_CHAR && msg.wParam == ' ') {
- if (!decoder_->Flush()) {
- LOG(ERROR) << "Flush failed";
- }
// Seek forward 5 seconds.
+ decoder_->Flush();
reader_->SeekForward(5000000);
}
}
@@ -131,17 +130,150 @@ class WindowObserver : public base::MessagePumpWin::Observer {
MftH264Decoder* decoder_;
};
-static int Run(bool use_dxva, bool render, const std::string& input_file) {
- // If we are not rendering, we need a window anyway to create a D3D device,
- // so we will just use the desktop window. (?)
- HWND window = GetDesktopWindow();
- if (render) {
- window = CreateDrawWindow(640, 480);
- if (window == NULL) {
- LOG(ERROR) << "Failed to create window";
- return -1;
+class MftH264DecoderHandler
+ : public VideoDecodeEngine::EventHandler,
+ public base::RefCountedThreadSafe<MftH264DecoderHandler> {
+ public:
+ MftH264DecoderHandler() : frames_read_(0), frames_decoded_(0) {
+ memset(&info_, 0, sizeof(info_));
+ }
+ virtual ~MftH264DecoderHandler() {}
+ virtual void OnInitializeComplete(const VideoCodecInfo& info) {
+ info_ = info;
+ }
+ virtual void OnUninitializeComplete() {
+ }
+ virtual void OnFlushComplete() {
+ }
+ virtual void OnSeekComplete() {}
+ virtual void OnError() {}
+ virtual void OnFormatChange(VideoStreamInfo stream_info) {
+ info_.stream_info_ = stream_info;
+ }
+ virtual void OnEmptyBufferCallback(scoped_refptr<Buffer> buffer) {
+ if (reader_ && decoder_.get()) {
+ scoped_refptr<DataBuffer> input;
+ reader_->Read(&input);
+ if (!input->IsEndOfStream())
+ frames_read_++;
+ decoder_->EmptyThisBuffer(input);
+ }
+ }
+ virtual void OnFillBufferCallback(scoped_refptr<VideoFrame> frame) {
+ if (frame.get()) {
+ if (frame->format() != VideoFrame::EMPTY) {
+ frames_decoded_++;
+ }
}
}
+ virtual void SetReader(FFmpegFileReader* reader) {
+ reader_ = reader;
+ }
+ virtual void SetDecoder(scoped_refptr<MftH264Decoder> decoder) {
+ decoder_ = decoder;
+ }
+ virtual void DecodeSingleFrame() {
+ scoped_refptr<VideoFrame> frame;
+ decoder_->FillThisBuffer(frame);
+ }
+ virtual void Start() {
+ while (decoder_->state() != MftH264Decoder::kStopped)
+ DecodeSingleFrame();
+ }
+
+ VideoCodecInfo info_;
+ int frames_read_;
+ int frames_decoded_;
+ FFmpegFileReader* reader_;
+ scoped_refptr<MftH264Decoder> decoder_;
+};
+
+class RenderToWindowHandler : public MftH264DecoderHandler {
+ public:
+ RenderToWindowHandler(HWND window, MessageLoop* loop)
+ : MftH264DecoderHandler(),
+ window_(window),
+ loop_(loop),
+ has_output_(false) {
+ }
+ virtual ~RenderToWindowHandler() {}
+ virtual void OnFillBufferCallback(scoped_refptr<VideoFrame> frame) {
+ has_output_ = true;
+ if (frame.get()) {
+ if (frame->format() != VideoFrame::EMPTY) {
+ frames_decoded_++;
+ loop_->PostDelayedTask(
+ FROM_HERE,
+ NewRunnableMethod(this, &RenderToWindowHandler::DecodeSingleFrame),
+ frame->GetDuration().InMilliseconds());
+
+ int width = frame->width();
+ int height = frame->height();
+
+ // Assume height does not change.
+ static uint8* rgb_frame = new uint8[height * frame->stride(0) * 4];
+ uint8* frame_y = static_cast<uint8*>(frame->data(VideoFrame::kYPlane));
+ uint8* frame_u = static_cast<uint8*>(frame->data(VideoFrame::kUPlane));
+ uint8* frame_v = static_cast<uint8*>(frame->data(VideoFrame::kVPlane));
+ media::ConvertYUVToRGB32(frame_y, frame_v, frame_u, rgb_frame,
+ width, height,
+ frame->stride(0), frame->stride(1),
+ 4 * frame->stride(0), media::YV12);
+ PAINTSTRUCT ps;
+ InvalidateRect(window_, NULL, TRUE);
+ HDC hdc = BeginPaint(window_, &ps);
+ BITMAPINFOHEADER hdr;
+ hdr.biSize = sizeof(BITMAPINFOHEADER);
+ hdr.biWidth = width;
+ hdr.biHeight = -height; // minus means top-down bitmap
+ hdr.biPlanes = 1;
+ hdr.biBitCount = 32;
+ hdr.biCompression = BI_RGB; // no compression
+ hdr.biSizeImage = 0;
+ hdr.biXPelsPerMeter = 1;
+ hdr.biYPelsPerMeter = 1;
+ hdr.biClrUsed = 0;
+ hdr.biClrImportant = 0;
+ int rv = StretchDIBits(hdc, 0, 0, width, height, 0, 0, width, height,
+ rgb_frame, reinterpret_cast<BITMAPINFO*>(&hdr),
+ DIB_RGB_COLORS, SRCCOPY);
+ EndPaint(window_, &ps);
+ if (!rv) {
+ LOG(ERROR) << "StretchDIBits failed";
+ loop_->QuitNow();
+ }
+ } else { // if frame is type EMPTY, there will be no more frames.
+ loop_->QuitNow();
+ }
+ }
+ }
+ virtual void DecodeSingleFrame() {
+ if (decoder_->state() != MftH264Decoder::kStopped) {
+ while (decoder_->state() != MftH264Decoder::kStopped && !has_output_) {
+ scoped_refptr<VideoFrame> frame;
+ decoder_->FillThisBuffer(frame);
+ }
+ if (decoder_->state() == MftH264Decoder::kStopped)
+ loop_->QuitNow();
+ has_output_ = false;
+ } else {
+ loop_->QuitNow();
+ }
+ }
+ virtual void Start() {
+ loop_->PostTask(
+ FROM_HERE,
+ NewRunnableMethod(this, &RenderToWindowHandler::DecodeSingleFrame));
+ loop_->Run();
+ }
+
+ private:
+ HWND window_;
+ MessageLoop* loop_;
+ bool has_output_;
+};
+
+static int Run(bool use_dxva, bool render, const std::string& input_file) {
scoped_ptr<FFmpegFileReader> reader(new FFmpegFileReader(input_file));
if (reader.get() == NULL || !reader->Initialize()) {
LOG(ERROR) << "Failed to create/initialize reader";
@@ -151,86 +283,54 @@ static int Run(bool use_dxva, bool render, const std::string& input_file) {
if (!reader->GetWidth(&width) || !reader->GetHeight(&height)) {
LOG(WARNING) << "Failed to get width/height from reader";
}
- int aspect_ratio_num = 0, aspect_ratio_denom = 0;
- if (!reader->GetAspectRatio(&aspect_ratio_num, &aspect_ratio_denom)) {
- LOG(WARNING) << "Failed to get aspect ratio";
- }
- int frame_rate_num = 0, frame_rate_denom = 0;
- if (!reader->GetFrameRate(&frame_rate_num, &frame_rate_denom)) {
- LOG(WARNING) << "Failed to get frame rate";
- }
- ScopedComPtr<IDirect3D9> d3d9;
- ScopedComPtr<IDirect3DDevice9> device;
- ScopedComPtr<IDirect3DDeviceManager9> dev_manager;
- if (use_dxva) {
- dev_manager.Attach(media::CreateD3DDevManager(window,
- d3d9.Receive(),
- device.Receive()));
- if (dev_manager.get() == NULL) {
- LOG(ERROR) << "Cannot create D3D9 manager";
+ VideoCodecConfig config;
+ config.width_ = width;
+ config.height_ = height;
+ HWND window = NULL;
+ if (render) {
+ window = CreateDrawWindow(width, height);
+ if (window == NULL) {
+ LOG(ERROR) << "Failed to create window";
return -1;
}
}
+
scoped_refptr<MftH264Decoder> mft(new MftH264Decoder(use_dxva));
- scoped_refptr<MftRenderer> renderer;
- if (render) {
- renderer = new BasicRenderer(mft.get(), window, device);
- } else {
- renderer = new NullRenderer(mft.get());
- }
- if (mft.get() == NULL) {
- LOG(ERROR) << "Failed to create fake renderer / MFT";
+ if (!mft.get()) {
+ LOG(ERROR) << "Failed to create fake MFT";
return -1;
}
- if (!mft->Init(dev_manager,
- frame_rate_num, frame_rate_denom,
- width, height,
- aspect_ratio_num, aspect_ratio_denom,
- NewCallback(reader.get(), &FFmpegFileReader::Read),
- NewCallback(renderer.get(), &MftRenderer::ProcessFrame),
- NewCallback(renderer.get(),
- &MftRenderer::OnDecodeError))) {
- LOG(ERROR) << "Failed to initialize mft";
+
+ scoped_refptr<MftH264DecoderHandler> handler;
+ if (render)
+ handler = new RenderToWindowHandler(window, MessageLoop::current());
+ else
+ handler = new MftH264DecoderHandler();
+ handler->SetDecoder(mft);
+ handler->SetReader(reader.get());
+ if (!handler.get()) {
+ LOG(ERROR) << "FAiled to create handler";
return -1;
}
+
+ mft->Initialize(MessageLoop::current(), handler.get(), config);
scoped_ptr<WindowObserver> observer;
+
// If rendering, resize the window to fit the video frames.
if (render) {
- RECT rect;
- rect.left = 0;
- rect.right = mft->width();
- rect.top = 0;
- rect.bottom = mft->height();
- AdjustWindowRect(&rect, kWindowStyleFlags, FALSE);
- if (!MoveWindow(window, 0, 0, rect.right - rect.left,
- rect.bottom - rect.top, TRUE)) {
- LOG(WARNING) << "Warning: Failed to resize window";
- }
observer.reset(new WindowObserver(reader.get(), mft.get()));
MessageLoopForUI::current()->AddObserver(observer.get());
}
- if (use_dxva) {
- // Reset the device's back buffer dimensions to match the window's
- // dimensions.
- if (!media::AdjustD3DDeviceBackBufferDimensions(device.get(),
- window,
- mft->width(),
- mft->height())) {
- LOG(WARNING) << "Warning: Failed to reset device to have correct "
- << "backbuffer dimension, scaling might occur";
- }
- }
- Time decode_start(Time::Now());
-
- MessageLoopForUI::current()->PostTask(FROM_HERE,
- NewRunnableMethod(renderer.get(), &MftRenderer::StartPlayback));
- MessageLoopForUI::current()->Run(NULL);
+ Time decode_start(Time::Now());
+ handler->Start();
TimeDelta decode_time = Time::Now() - decode_start;
printf("All done, frames read: %d, frames decoded: %d\n",
- mft->frames_read(), mft->frames_decoded());
+ handler->frames_read_, handler->frames_decoded_);
printf("Took %lldms\n", decode_time.InMilliseconds());
+ if (window)
+ DestroyWindow(window);
return 0;
}
@@ -266,10 +366,6 @@ int main(int argc, char** argv) {
LOG(ERROR) << "InitFFMpeg() failed";
return -1;
}
- if (!InitComLibrary()) {
- LOG(ERROR) << "InitComLibraries() failed";
- return -1;
- }
int ret = Run(use_dxva, render, input_file);
printf("Done\n");
diff --git a/media/mf/test/mft_h264_decoder_unittest.cc b/media/mf/test/mft_h264_decoder_unittest.cc
index daf2c66..0e78449 100644
--- a/media/mf/test/mft_h264_decoder_unittest.cc
+++ b/media/mf/test/mft_h264_decoder_unittest.cc
@@ -2,37 +2,42 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include <d3d9.h>
-#include <dxva2api.h>
-#include <mfapi.h>
-
#include "base/file_path.h"
#include "base/file_util.h"
#include "base/message_loop.h"
#include "base/path_service.h"
-#include "base/scoped_comptr_win.h"
+#include "base/ref_counted.h"
#include "base/scoped_ptr.h"
#include "base/string_util.h"
+#include "base/time.h"
#include "media/base/data_buffer.h"
#include "media/base/video_frame.h"
-#include "media/mf/d3d_util.h"
+#include "media/filters/video_decode_engine.h"
#include "media/mf/file_reader_util.h"
#include "media/mf/mft_h264_decoder.h"
#include "testing/gtest/include/gtest/gtest.h"
+using base::TimeDelta;
+
namespace media {
static const int kDecoderMaxWidth = 1920;
static const int kDecoderMaxHeight = 1088;
-class FakeMftReader {
+class BaseMftReader : public base::RefCountedThreadSafe<BaseMftReader> {
+ public:
+ virtual ~BaseMftReader() {}
+ virtual void ReadCallback(scoped_refptr<DataBuffer>* input) = 0;
+};
+
+class FakeMftReader : public BaseMftReader {
public:
FakeMftReader() : frames_remaining_(20) {}
explicit FakeMftReader(int count) : frames_remaining_(count) {}
- ~FakeMftReader() {}
+ virtual ~FakeMftReader() {}
// Provides garbage input to the decoder.
- void ReadCallback(scoped_refptr<DataBuffer>* input) {
+ virtual void ReadCallback(scoped_refptr<DataBuffer>* input) {
if (frames_remaining_ > 0) {
int sz = 4096;
uint8* buf = new uint8[sz];
@@ -54,49 +59,34 @@ class FakeMftReader {
int frames_remaining_;
};
-class FakeMftRenderer : public base::RefCountedThreadSafe<FakeMftRenderer> {
+class FFmpegFileReaderWrapper : public BaseMftReader {
public:
- explicit FakeMftRenderer(scoped_refptr<MftH264Decoder> decoder)
- : decoder_(decoder),
- count_(0),
- flush_countdown_(0) {
+ FFmpegFileReaderWrapper() {}
+ virtual ~FFmpegFileReaderWrapper() {}
+ bool InitReader(const std::string& filename) {
+ reader_.reset(new FFmpegFileReader(filename));
+ if (!reader_.get() || !reader_->Initialize()) {
+ reader_.reset();
+ return false;
+ }
+ return true;
}
-
- virtual ~FakeMftRenderer() {}
-
- virtual void WriteCallback(scoped_refptr<VideoFrame> frame) {
- static_cast<IMFMediaBuffer*>(frame->private_buffer())->Release();
- ++count_;
- if (flush_countdown_ > 0) {
- if (--flush_countdown_ == 0) {
- decoder_->Flush();
- }
+ virtual void ReadCallback(scoped_refptr<DataBuffer>* input) {
+ if (reader_.get()) {
+ reader_->Read(input);
}
- MessageLoop::current()->PostTask(
- FROM_HERE,
- NewRunnableMethod(decoder_.get(), &MftH264Decoder::GetOutput));
}
-
- virtual void Start() {
- MessageLoop::current()->PostTask(
- FROM_HERE,
- NewRunnableMethod(decoder_.get(), &MftH264Decoder::GetOutput));
+ bool GetWidth(int* width) {
+ if (!reader_.get())
+ return false;
+ return reader_->GetWidth(width);
}
-
- virtual void OnDecodeError(MftH264Decoder::Error error) {
- MessageLoop::current()->Quit();
+ bool GetHeight(int* height) {
+ if (!reader_.get())
+ return false;
+ return reader_->GetHeight(height);
}
-
- virtual void SetFlushCountdown(int countdown) {
- flush_countdown_ = countdown;
- }
-
- int count() const { return count_; }
-
- protected:
- scoped_refptr<MftH264Decoder> decoder_;
- int count_;
- int flush_countdown_;
+ scoped_ptr<FFmpegFileReader> reader_;
};
class MftH264DecoderTest : public testing::Test {
@@ -109,158 +99,270 @@ class MftH264DecoderTest : public testing::Test {
virtual void TearDown() {}
};
+class SimpleMftH264DecoderHandler : public VideoDecodeEngine::EventHandler {
+ public:
+ SimpleMftH264DecoderHandler()
+ : init_count_(0),
+ uninit_count_(0),
+ flush_count_(0),
+ format_change_count_(0),
+ empty_buffer_callback_count_(0),
+ fill_buffer_callback_count_(0) {
+ memset(&info_, 0, sizeof(info_));
+ }
+ virtual ~SimpleMftH264DecoderHandler() {}
+ virtual void OnInitializeComplete(const VideoCodecInfo& info) {
+ info_ = info;
+ init_count_++;
+ }
+ virtual void OnUninitializeComplete() {
+ uninit_count_++;
+ }
+ virtual void OnFlushComplete() {
+ flush_count_++;
+ }
+ virtual void OnSeekComplete() {}
+ virtual void OnError() {}
+ virtual void OnFormatChange(VideoStreamInfo stream_info) {
+ format_change_count_++;
+ info_.stream_info_ = stream_info;
+ }
+ virtual void OnEmptyBufferCallback(scoped_refptr<Buffer> buffer) {
+ if (reader_.get() && decoder_.get()) {
+ empty_buffer_callback_count_++;
+ scoped_refptr<DataBuffer> input;
+ reader_->ReadCallback(&input);
+ decoder_->EmptyThisBuffer(input);
+ }
+ }
+ virtual void OnFillBufferCallback(scoped_refptr<VideoFrame> frame) {
+ fill_buffer_callback_count_++;
+ current_frame_ = frame;
+ }
+ void SetReader(scoped_refptr<BaseMftReader> reader) {
+ reader_ = reader;
+ }
+ void SetDecoder(scoped_refptr<MftH264Decoder> decoder) {
+ decoder_ = decoder;
+ }
+
+ int init_count_;
+ int uninit_count_;
+ int flush_count_;
+ int format_change_count_;
+ int empty_buffer_callback_count_;
+ int fill_buffer_callback_count_;
+ VideoCodecInfo info_;
+ scoped_refptr<BaseMftReader> reader_;
+ scoped_refptr<MftH264Decoder> decoder_;
+ scoped_refptr<VideoFrame> current_frame_;
+};
+
// A simple test case for init/deinit of MF/COM libraries.
-TEST_F(MftH264DecoderTest, SimpleInit) {
- EXPECT_HRESULT_SUCCEEDED(
- CoInitializeEx(NULL, COINIT_APARTMENTTHREADED | COINIT_DISABLE_OLE1DDE));
- EXPECT_HRESULT_SUCCEEDED(MFStartup(MF_VERSION, MFSTARTUP_FULL));
- EXPECT_HRESULT_SUCCEEDED(MFShutdown());
- CoUninitialize();
+TEST_F(MftH264DecoderTest, LibraryInit) {
+ EXPECT_TRUE(MftH264Decoder::StartupComLibraries());
+ MftH264Decoder::ShutdownComLibraries();
}
-TEST_F(MftH264DecoderTest, InitWithDxvaButNoD3dDevice) {
+TEST_F(MftH264DecoderTest, DecoderUninitializedAtFirst) {
scoped_refptr<MftH264Decoder> decoder(new MftH264Decoder(true));
- ASSERT_TRUE(decoder.get() != NULL);
- FakeMftReader reader;
- scoped_refptr<FakeMftRenderer> renderer(new FakeMftRenderer(decoder));
- EXPECT_FALSE(
- decoder->Init(NULL, 6, 7, 111, 222, 3, 1,
- NewCallback(&reader, &FakeMftReader::ReadCallback),
- NewCallback(renderer.get(),
- &FakeMftRenderer::WriteCallback),
- NewCallback(renderer.get(),
- &FakeMftRenderer::OnDecodeError)));
+ ASSERT_TRUE(decoder.get());
+ EXPECT_EQ(MftH264Decoder::kUninitialized, decoder->state());
}
-TEST_F(MftH264DecoderTest, InitMissingCallbacks) {
+TEST_F(MftH264DecoderTest, DecoderInitMissingArgs) {
+ VideoCodecConfig config;
+ config.width_ = 800;
+ config.height_ = 600;
scoped_refptr<MftH264Decoder> decoder(new MftH264Decoder(false));
- ASSERT_TRUE(decoder.get() != NULL);
- EXPECT_FALSE(decoder->Init(NULL, 1, 3, 111, 222, 56, 34, NULL, NULL, NULL));
+ ASSERT_TRUE(decoder.get());
+ decoder->Initialize(NULL, NULL, config);
+ EXPECT_EQ(MftH264Decoder::kUninitialized, decoder->state());
}
-TEST_F(MftH264DecoderTest, InitWithNegativeDimensions) {
+TEST_F(MftH264DecoderTest, DecoderInitNoDxva) {
+ MessageLoop loop;
+ SimpleMftH264DecoderHandler handler;
+ VideoCodecConfig config;
+ config.width_ = 800;
+ config.height_ = 600;
scoped_refptr<MftH264Decoder> decoder(new MftH264Decoder(false));
- ASSERT_TRUE(decoder.get() != NULL);
- FakeMftReader reader;
- scoped_refptr<FakeMftRenderer> renderer(new FakeMftRenderer(decoder));
- EXPECT_TRUE(decoder->Init(NULL, 0, 6, -123, -456, 22, 4787,
- NewCallback(&reader, &FakeMftReader::ReadCallback),
- NewCallback(renderer.get(),
- &FakeMftRenderer::WriteCallback),
- NewCallback(renderer.get(),
- &FakeMftRenderer::OnDecodeError)));
-
- // By default, decoder should "guess" the dimensions to be the maximum.
- EXPECT_EQ(kDecoderMaxWidth, decoder->width());
- EXPECT_EQ(kDecoderMaxHeight, decoder->height());
+ ASSERT_TRUE(decoder.get());
+ decoder->Initialize(&loop, &handler, config);
+ EXPECT_EQ(1, handler.init_count_);
+ EXPECT_EQ(MftH264Decoder::kNormal, decoder->state());
+ decoder->Uninitialize();
}
-TEST_F(MftH264DecoderTest, InitWithTooHighDimensions) {
+TEST_F(MftH264DecoderTest, DecoderInitDxva) {
+ MessageLoop loop;
+ SimpleMftH264DecoderHandler handler;
+ VideoCodecConfig config;
+ config.width_ = 800;
+ config.height_ = 600;
+ scoped_refptr<MftH264Decoder> decoder(new MftH264Decoder(true));
+ ASSERT_TRUE(decoder.get());
+ decoder->Initialize(&loop, &handler, config);
+ EXPECT_EQ(1, handler.init_count_);
+ EXPECT_EQ(MftH264Decoder::kNormal, decoder->state());
+ decoder->Uninitialize();
+}
+
+TEST_F(MftH264DecoderTest, DecoderUninit) {
+ MessageLoop loop;
+ SimpleMftH264DecoderHandler handler;
+ VideoCodecConfig config;
+ config.width_ = 800;
+ config.height_ = 600;
scoped_refptr<MftH264Decoder> decoder(new MftH264Decoder(false));
- ASSERT_TRUE(decoder.get() != NULL);
- FakeMftReader reader;
- scoped_refptr<FakeMftRenderer> renderer(new FakeMftRenderer(decoder));
- EXPECT_TRUE(decoder->Init(NULL, 0, 0,
- kDecoderMaxWidth + 1, kDecoderMaxHeight + 1,
- 0, 0,
- NewCallback(&reader, &FakeMftReader::ReadCallback),
- NewCallback(renderer.get(),
- &FakeMftRenderer::WriteCallback),
- NewCallback(renderer.get(),
- &FakeMftRenderer::OnDecodeError)));
-
- // Decoder should truncate the dimensions to the maximum supported.
- EXPECT_EQ(kDecoderMaxWidth, decoder->width());
- EXPECT_EQ(kDecoderMaxHeight, decoder->height());
+ ASSERT_TRUE(decoder.get());
+ decoder->Initialize(&loop, &handler, config);
+ EXPECT_EQ(MftH264Decoder::kNormal, decoder->state());
+ decoder->Uninitialize();
+ EXPECT_EQ(1, handler.uninit_count_);
+ EXPECT_EQ(MftH264Decoder::kUninitialized, decoder->state());
}
-TEST_F(MftH264DecoderTest, InitWithNormalDimensions) {
+TEST_F(MftH264DecoderTest, UninitBeforeInit) {
+ MessageLoop loop;
+ SimpleMftH264DecoderHandler handler;
+ VideoCodecConfig config;
+ config.width_ = 800;
+ config.height_ = 600;
scoped_refptr<MftH264Decoder> decoder(new MftH264Decoder(false));
- ASSERT_TRUE(decoder.get() != NULL);
- FakeMftReader reader;
- scoped_refptr<FakeMftRenderer> renderer(new FakeMftRenderer(decoder));
- int width = 1024, height = 768;
- EXPECT_TRUE(decoder->Init(NULL, 0, 0, width, height, 0, 0,
- NewCallback(&reader, &FakeMftReader::ReadCallback),
- NewCallback(renderer.get(),
- &FakeMftRenderer::WriteCallback),
- NewCallback(renderer.get(),
- &FakeMftRenderer::OnDecodeError)));
-
- EXPECT_EQ(width, decoder->width());
- EXPECT_EQ(height, decoder->height());
+ ASSERT_TRUE(decoder.get());
+ decoder->Uninitialize();
+ EXPECT_EQ(0, handler.uninit_count_);
}
-// SendDrainMessage() is not a public method. Nonetheless it does not hurt
-// to test that the decoder should not do things before it is initialized.
-TEST_F(MftH264DecoderTest, SendDrainMessageBeforeInitDeathTest) {
+TEST_F(MftH264DecoderTest, InitWithNegativeDimensions) {
+ MessageLoop loop;
+ SimpleMftH264DecoderHandler handler;
+ VideoCodecConfig config;
+ config.width_ = -123;
+ config.height_ = -456;
scoped_refptr<MftH264Decoder> decoder(new MftH264Decoder(false));
- ASSERT_TRUE(decoder.get() != NULL);
- EXPECT_DEATH({ decoder->SendDrainMessage(); }, ".*initialized_.*");
+ ASSERT_TRUE(decoder.get());
+ decoder->Initialize(&loop, &handler, config);
+ EXPECT_EQ(MftH264Decoder::kNormal, decoder->state());
+ EXPECT_EQ(kDecoderMaxWidth, handler.info_.stream_info_.surface_width_);
+ EXPECT_EQ(kDecoderMaxHeight, handler.info_.stream_info_.surface_height_);
+ decoder->Uninitialize();
}
-// Tests draining after init, but before any input is sent.
-TEST_F(MftH264DecoderTest, SendDrainMessageAtInit) {
+TEST_F(MftH264DecoderTest, InitWithTooHighDimensions) {
+ MessageLoop loop;
+ SimpleMftH264DecoderHandler handler;
+ VideoCodecConfig config;
+ config.width_ = kDecoderMaxWidth + 1;
+ config.height_ = kDecoderMaxHeight + 1;
scoped_refptr<MftH264Decoder> decoder(new MftH264Decoder(false));
- ASSERT_TRUE(decoder.get() != NULL);
- FakeMftReader reader;
- scoped_refptr<FakeMftRenderer> renderer(new FakeMftRenderer(decoder));
- ASSERT_TRUE(decoder->Init(NULL, 0, 0, 111, 222, 0, 0,
- NewCallback(&reader, &FakeMftReader::ReadCallback),
- NewCallback(renderer.get(),
- &FakeMftRenderer::WriteCallback),
- NewCallback(renderer.get(),
- &FakeMftRenderer::OnDecodeError)));
- EXPECT_TRUE(decoder->SendDrainMessage());
- EXPECT_TRUE(decoder->drain_message_sent_);
+ ASSERT_TRUE(decoder.get());
+ decoder->Initialize(&loop, &handler, config);
+ EXPECT_EQ(MftH264Decoder::kNormal, decoder->state());
+ EXPECT_EQ(kDecoderMaxWidth, handler.info_.stream_info_.surface_width_);
+ EXPECT_EQ(kDecoderMaxHeight, handler.info_.stream_info_.surface_height_);
+ decoder->Uninitialize();
}
-TEST_F(MftH264DecoderTest, DrainOnEndOfInputStream) {
+TEST_F(MftH264DecoderTest, DrainOnEmptyBuffer) {
MessageLoop loop;
+ SimpleMftH264DecoderHandler handler;
+ VideoCodecConfig config;
+ config.width_ = 1024;
+ config.height_ = 768;
scoped_refptr<MftH264Decoder> decoder(new MftH264Decoder(false));
- ASSERT_TRUE(decoder.get() != NULL);
-
- // No frames, outputs a NULL indicating end-of-stream
- FakeMftReader reader(0);
- scoped_refptr<FakeMftRenderer> renderer(new FakeMftRenderer(decoder));
- ASSERT_TRUE(decoder->Init(NULL, 0, 0, 111, 222, 0, 0,
- NewCallback(&reader, &FakeMftReader::ReadCallback),
- NewCallback(renderer.get(),
- &FakeMftRenderer::WriteCallback),
- NewCallback(renderer.get(),
- &FakeMftRenderer::OnDecodeError)));
- MessageLoop::current()->PostTask(
- FROM_HERE,
- NewRunnableMethod(renderer.get(), &FakeMftRenderer::Start));
- MessageLoop::current()->Run();
- EXPECT_TRUE(decoder->drain_message_sent());
+ ASSERT_TRUE(decoder.get());
+ decoder->Initialize(&loop, &handler, config);
+ EXPECT_EQ(MftH264Decoder::kNormal, decoder->state());
+ scoped_refptr<Buffer> buffer(new DataBuffer(0));
+
+ // Decoder should switch to drain mode because of this NULL buffer, and then
+ // switch to kStopped when it says it needs more input during drain mode.
+ decoder->EmptyThisBuffer(buffer);
+ EXPECT_EQ(MftH264Decoder::kStopped, decoder->state());
+
+ // Should have called back with one empty frame.
+ EXPECT_EQ(1, handler.fill_buffer_callback_count_);
+ ASSERT_TRUE(handler.current_frame_.get());
+ EXPECT_EQ(VideoFrame::EMPTY, handler.current_frame_->format());
+ decoder->Uninitialize();
}
-// 100 input garbage samples should be enough to test whether the decoder
-// will output decoded garbage frames.
TEST_F(MftH264DecoderTest, NoOutputOnGarbageInput) {
+ // 100 samples of garbage.
+ const int kNumFrames = 100;
+ scoped_refptr<FakeMftReader> reader(new FakeMftReader(kNumFrames));
+ ASSERT_TRUE(reader.get());
+
MessageLoop loop;
+ SimpleMftH264DecoderHandler handler;
+ VideoCodecConfig config;
+ config.width_ = 1024;
+ config.height_ = 768;
scoped_refptr<MftH264Decoder> decoder(new MftH264Decoder(false));
- ASSERT_TRUE(decoder.get() != NULL);
- int num_frames = 100;
- FakeMftReader reader(num_frames);
- scoped_refptr<FakeMftRenderer> renderer(new FakeMftRenderer(decoder));
- ASSERT_TRUE(decoder->Init(NULL, 0, 0, 111, 222, 0, 0,
- NewCallback(&reader, &FakeMftReader::ReadCallback),
- NewCallback(renderer.get(),
- &FakeMftRenderer::WriteCallback),
- NewCallback(renderer.get(),
- &FakeMftRenderer::OnDecodeError)));
- MessageLoop::current()->PostTask(
- FROM_HERE, NewRunnableMethod(renderer.get(), &FakeMftRenderer::Start));
- MessageLoop::current()->Run();
-
- // Decoder should accept corrupt input data and silently ignore it.
- EXPECT_EQ(num_frames, decoder->frames_read());
-
- // Decoder should not have output anything if input is corrupt.
- EXPECT_EQ(0, decoder->frames_decoded());
- EXPECT_EQ(0, renderer->count());
+ ASSERT_TRUE(decoder.get());
+ decoder->Initialize(&loop, &handler, config);
+ EXPECT_EQ(MftH264Decoder::kNormal, decoder->state());
+ handler.SetReader(reader);
+ handler.SetDecoder(decoder);
+ while (MftH264Decoder::kStopped != decoder->state()) {
+ scoped_refptr<VideoFrame> frame;
+ decoder->FillThisBuffer(frame);
+ }
+
+ // Output callback should only be invoked once - the empty frame to indicate
+ // end of stream.
+ EXPECT_EQ(1, handler.fill_buffer_callback_count_);
+ ASSERT_TRUE(handler.current_frame_.get());
+ EXPECT_EQ(VideoFrame::EMPTY, handler.current_frame_->format());
+
+ // One extra count because of the end of stream NULL sample.
+ EXPECT_EQ(kNumFrames, handler.empty_buffer_callback_count_ - 1);
+ decoder->Uninitialize();
+}
+
+TEST_F(MftH264DecoderTest, FlushAtStart) {
+ MessageLoop loop;
+ SimpleMftH264DecoderHandler handler;
+ VideoCodecConfig config;
+ config.width_ = 1024;
+ config.height_ = 768;
+ scoped_refptr<MftH264Decoder> decoder(new MftH264Decoder(false));
+ ASSERT_TRUE(decoder.get());
+ decoder->Initialize(&loop, &handler, config);
+ EXPECT_EQ(MftH264Decoder::kNormal, decoder->state());
+ decoder->Flush();
+
+ // Flush should succeed even if input/output are empty.
+ EXPECT_EQ(1, handler.flush_count_);
+ decoder->Uninitialize();
+}
+
+TEST_F(MftH264DecoderTest, NoFlushAtStopped) {
+ scoped_refptr<BaseMftReader> reader(new FakeMftReader());
+ ASSERT_TRUE(reader.get());
+
+ MessageLoop loop;
+ SimpleMftH264DecoderHandler handler;
+ VideoCodecConfig config;
+ config.width_ = 1024;
+ config.height_ = 768;
+ scoped_refptr<MftH264Decoder> decoder(new MftH264Decoder(false));
+ ASSERT_TRUE(decoder.get());
+ decoder->Initialize(&loop, &handler, config);
+ EXPECT_EQ(MftH264Decoder::kNormal, decoder->state());
+ handler.SetReader(reader);
+ handler.SetDecoder(decoder);
+ while (MftH264Decoder::kStopped != decoder->state()) {
+ scoped_refptr<VideoFrame> frame;
+ decoder->FillThisBuffer(frame);
+ }
+ EXPECT_EQ(0, handler.flush_count_);
+ int old_flush_count = handler.flush_count_;
+ decoder->Flush();
+ EXPECT_EQ(old_flush_count, handler.flush_count_);
+ decoder->Uninitialize();
}
FilePath GetVideoFilePath(const std::string& file_name) {
@@ -273,116 +375,49 @@ FilePath GetVideoFilePath(const std::string& file_name) {
return path;
}
-// Decodes media/test/data/bear.1280x720.mp4 which is expected to be a valid
-// H.264 video.
-TEST_F(MftH264DecoderTest, DecodeValidVideoDxva) {
- MessageLoop loop;
- FilePath path = GetVideoFilePath("bear.1280x720.mp4");
+void DecodeValidVideo(const std::string& filename, int num_frames, bool dxva) {
+ scoped_refptr<FFmpegFileReaderWrapper> reader(new FFmpegFileReaderWrapper());
+ ASSERT_TRUE(reader.get());
+ FilePath path = GetVideoFilePath(filename);
ASSERT_TRUE(file_util::PathExists(path));
+ ASSERT_TRUE(reader->InitReader(WideToASCII(path.value())));
+ int actual_width;
+ int actual_height;
+ ASSERT_TRUE(reader->GetWidth(&actual_width));
+ ASSERT_TRUE(reader->GetHeight(&actual_height));
- ScopedComPtr<IDirect3D9> d3d9;
- ScopedComPtr<IDirect3DDevice9> device;
- ScopedComPtr<IDirect3DDeviceManager9> dev_manager;
- dev_manager.Attach(CreateD3DDevManager(GetDesktopWindow(),
- d3d9.Receive(),
- device.Receive()));
- ASSERT_TRUE(dev_manager.get() != NULL);
-
- scoped_refptr<MftH264Decoder> decoder(new MftH264Decoder(true));
- ASSERT_TRUE(decoder.get() != NULL);
- scoped_ptr<FFmpegFileReader> reader(
- new FFmpegFileReader(WideToASCII(path.value())));
- ASSERT_TRUE(reader->Initialize());
- scoped_refptr<FakeMftRenderer> renderer(new FakeMftRenderer(decoder));
- ASSERT_TRUE(decoder->Init(dev_manager.get(), 0, 0, 111, 222, 0, 0,
- NewCallback(reader.get(), &FFmpegFileReader::Read),
- NewCallback(renderer.get(),
- &FakeMftRenderer::WriteCallback),
- NewCallback(renderer.get(),
- &FakeMftRenderer::OnDecodeError)));
- MessageLoop::current()->PostTask(
- FROM_HERE,
- NewRunnableMethod(renderer.get(), &FakeMftRenderer::Start));
- MessageLoop::current()->Run();
-
- // If the video is valid, then it should output frames. However, for some
- // videos, the number of frames decoded is one-off.
- EXPECT_EQ(82, decoder->frames_read());
- EXPECT_LE(decoder->frames_read() - decoder->frames_decoded(), 1);
-}
+ MessageLoop loop;
+ SimpleMftH264DecoderHandler handler;
+ VideoCodecConfig config;
+ config.width_ = 1;
+ config.height_ = 1;
+ scoped_refptr<MftH264Decoder> decoder(new MftH264Decoder(dxva));
+ ASSERT_TRUE(decoder.get());
+ decoder->Initialize(&loop, &handler, config);
+ EXPECT_EQ(MftH264Decoder::kNormal, decoder->state());
+ handler.SetReader(reader);
+ handler.SetDecoder(decoder);
+ while (MftH264Decoder::kStopped != decoder->state()) {
+ scoped_refptr<VideoFrame> frame;
+ decoder->FillThisBuffer(frame);
+ }
-TEST_F(MftH264DecoderTest, FlushAtInit) {
- scoped_refptr<MftH264Decoder> decoder(new MftH264Decoder(false));
- ASSERT_TRUE(decoder.get() != NULL);
- FakeMftReader reader;
- scoped_refptr<FakeMftRenderer> renderer(new FakeMftRenderer(decoder));
- ASSERT_TRUE(decoder->Init(NULL, 0, 0, 111, 222, 0, 0,
- NewCallback(&reader, &FakeMftReader::ReadCallback),
- NewCallback(renderer.get(),
- &FakeMftRenderer::WriteCallback),
- NewCallback(renderer.get(),
- &FakeMftRenderer::OnDecodeError)));
- EXPECT_TRUE(decoder->Flush());
+ // We expect a format change when decoder receives enough data to determine
+ // the actual frame width/height.
+ EXPECT_GT(handler.format_change_count_, 0);
+ EXPECT_EQ(actual_width, handler.info_.stream_info_.surface_width_);
+ EXPECT_EQ(actual_height, handler.info_.stream_info_.surface_height_);
+ EXPECT_GE(handler.empty_buffer_callback_count_, num_frames);
+ EXPECT_EQ(num_frames, handler.fill_buffer_callback_count_ - 1);
+ decoder->Uninitialize();
}
-TEST_F(MftH264DecoderTest, FlushAtEnd) {
- MessageLoop loop;
- scoped_refptr<MftH264Decoder> decoder(new MftH264Decoder(false));
- ASSERT_TRUE(decoder.get() != NULL);
-
- // No frames, outputs a NULL indicating end-of-stream
- FakeMftReader reader(0);
- scoped_refptr<FakeMftRenderer> renderer(new FakeMftRenderer(decoder));
- ASSERT_TRUE(decoder->Init(NULL, 0, 0, 111, 222, 0, 0,
- NewCallback(&reader, &FakeMftReader::ReadCallback),
- NewCallback(renderer.get(),
- &FakeMftRenderer::WriteCallback),
- NewCallback(renderer.get(),
- &FakeMftRenderer::OnDecodeError)));
- MessageLoop::current()->PostTask(
- FROM_HERE,
- NewRunnableMethod(renderer.get(), &FakeMftRenderer::Start));
- MessageLoop::current()->Run();
- EXPECT_TRUE(decoder->Flush());
+TEST_F(MftH264DecoderTest, DecodeValidVideoDxva) {
+ DecodeValidVideo("bear.1280x720.mp4", 82, true);
}
-TEST_F(MftH264DecoderTest, FlushAtMiddle) {
- MessageLoop loop;
- FilePath path = GetVideoFilePath("bear.1280x720.mp4");
- ASSERT_TRUE(file_util::PathExists(path));
-
- ScopedComPtr<IDirect3D9> d3d9;
- ScopedComPtr<IDirect3DDevice9> device;
- ScopedComPtr<IDirect3DDeviceManager9> dev_manager;
- dev_manager.Attach(CreateD3DDevManager(GetDesktopWindow(),
- d3d9.Receive(),
- device.Receive()));
- ASSERT_TRUE(dev_manager.get() != NULL);
-
- scoped_refptr<MftH264Decoder> decoder(new MftH264Decoder(true));
- ASSERT_TRUE(decoder.get() != NULL);
- scoped_ptr<FFmpegFileReader> reader(
- new FFmpegFileReader(WideToASCII(path.value())));
- ASSERT_TRUE(reader->Initialize());
- scoped_refptr<FakeMftRenderer> renderer(new FakeMftRenderer(decoder));
- ASSERT_TRUE(renderer.get());
-
- // Flush after obtaining 40 decode frames. There are no more key frames after
- // the first one, so we expect it to stop outputting frames after flush.
- int flush_at_nth_decoded_frame = 40;
- renderer->SetFlushCountdown(flush_at_nth_decoded_frame);
- ASSERT_TRUE(decoder->Init(dev_manager.get(), 0, 0, 111, 222, 0, 0,
- NewCallback(reader.get(), &FFmpegFileReader::Read),
- NewCallback(renderer.get(),
- &FakeMftRenderer::WriteCallback),
- NewCallback(renderer.get(),
- &FakeMftRenderer::OnDecodeError)));
- MessageLoop::current()->PostTask(
- FROM_HERE,
- NewRunnableMethod(renderer.get(), &FakeMftRenderer::Start));
- MessageLoop::current()->Run();
- EXPECT_EQ(82, decoder->frames_read());
- EXPECT_EQ(decoder->frames_decoded(), flush_at_nth_decoded_frame);
+TEST_F(MftH264DecoderTest, DecodeValidVideoNoDxva) {
+ DecodeValidVideo("bear.1280x720.mp4", 82, false);
}
} // namespace media
diff --git a/media/mf/test/run_all_unittests.cc b/media/mf/test/run_all_unittests.cc
index 76d9182..4126108 100644
--- a/media/mf/test/run_all_unittests.cc
+++ b/media/mf/test/run_all_unittests.cc
@@ -2,8 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include <cstdio>
-
#include "base/file_path.h"
#include "base/test/test_suite.h"
#include "media/base/media.h"