summaryrefslogtreecommitdiffstats
path: root/media
diff options
context:
space:
mode:
authorhclam@chromium.org <hclam@chromium.org@0039d316-1c4b-4281-b951-d872f2087c98>2010-09-17 12:49:26 +0000
committerhclam@chromium.org <hclam@chromium.org@0039d316-1c4b-4281-b951-d872f2087c98>2010-09-17 12:49:26 +0000
commite65efc1a7a29d16b3baa0839ecc88c26663d88af (patch)
tree2075dbaf6e60fc1f2eaf4c35459b9366e104fdb9 /media
parent9b98f084607bc1e9185be7f29b52f1e4c0fa498d (diff)
downloadchromium_src-e65efc1a7a29d16b3baa0839ecc88c26663d88af.zip
chromium_src-e65efc1a7a29d16b3baa0839ecc88c26663d88af.tar.gz
chromium_src-e65efc1a7a29d16b3baa0839ecc88c26663d88af.tar.bz2
Implement FakeGlVideoDecodeEngine using FakeGlVideoDecodeContext
Defines UploadToVideoFrame in VideoDecodeContext. FakeGlVideoDecodeEngine now uses FakeGlVideoDecodeContext to video frame allocation and uploading. BUG=53714 TEST=Tree is green Review URL: http://codereview.chromium.org/3312022 git-svn-id: svn://svn.chromium.org/chrome/trunk/src@59785 0039d316-1c4b-4281-b951-d872f2087c98
Diffstat (limited to 'media')
-rw-r--r--media/filters/ffmpeg_video_decoder.cc2
-rw-r--r--media/filters/ffmpeg_video_decoder_unittest.cc8
-rw-r--r--media/filters/omx_video_decoder.cc2
-rw-r--r--media/mf/mft_h264_decoder.cc1
-rw-r--r--media/mf/mft_h264_decoder.h3
-rw-r--r--media/mf/mft_h264_decoder_example.cc6
-rw-r--r--media/mf/test/mft_h264_decoder_unittest.cc22
-rw-r--r--media/tools/omx_test/omx_test.cc2
-rw-r--r--media/video/ffmpeg_video_decode_engine.cc1
-rw-r--r--media/video/ffmpeg_video_decode_engine.h1
-rw-r--r--media/video/ffmpeg_video_decode_engine_unittest.cc8
-rw-r--r--media/video/omx_video_decode_engine.cc1
-rw-r--r--media/video/omx_video_decode_engine.h1
-rw-r--r--media/video/video_decode_context.h28
-rw-r--r--media/video/video_decode_engine.h17
15 files changed, 70 insertions, 33 deletions
diff --git a/media/filters/ffmpeg_video_decoder.cc b/media/filters/ffmpeg_video_decoder.cc
index 6faa410e..c79f679 100644
--- a/media/filters/ffmpeg_video_decoder.cc
+++ b/media/filters/ffmpeg_video_decoder.cc
@@ -95,7 +95,7 @@ void FFmpegVideoDecoder::Initialize(DemuxerStream* demuxer_stream,
config.opaque_context = av_stream;
config.width = width_;
config.height = height_;
- decode_engine_->Initialize(message_loop(), this, config);
+ decode_engine_->Initialize(message_loop(), this, NULL, config);
}
void FFmpegVideoDecoder::OnInitializeComplete(const VideoCodecInfo& info) {
diff --git a/media/filters/ffmpeg_video_decoder_unittest.cc b/media/filters/ffmpeg_video_decoder_unittest.cc
index 6f2eb95..6ed176f 100644
--- a/media/filters/ffmpeg_video_decoder_unittest.cc
+++ b/media/filters/ffmpeg_video_decoder_unittest.cc
@@ -18,6 +18,7 @@
#include "media/filters/ffmpeg_interfaces.h"
#include "media/filters/ffmpeg_video_decoder.h"
#include "media/video/video_decode_engine.h"
+#include "media/video/video_decode_context.h"
#include "testing/gtest/include/gtest/gtest.h"
using ::testing::_;
@@ -50,8 +51,9 @@ class MockFFmpegDemuxerStream : public MockDemuxerStream,
// TODO(hclam): Share this in a separate file.
class MockVideoDecodeEngine : public VideoDecodeEngine {
public:
- MOCK_METHOD3(Initialize, void(MessageLoop* message_loop,
+ MOCK_METHOD4(Initialize, void(MessageLoop* message_loop,
VideoDecodeEngine::EventHandler* event_handler,
+ VideoDecodeContext* context,
const VideoCodecConfig& config));
MOCK_METHOD1(ConsumeVideoSample, void(scoped_refptr<Buffer> buffer));
MOCK_METHOD1(ProduceVideoFrame, void(scoped_refptr<VideoFrame> buffer));
@@ -182,7 +184,7 @@ class FFmpegVideoDecoderTest : public testing::Test {
EXPECT_CALL(*demuxer_, GetAVStream())
.WillOnce(Return(&stream_));
- EXPECT_CALL(*engine_, Initialize(_, _, _))
+ EXPECT_CALL(*engine_, Initialize(_, _, _, _))
.WillOnce(EngineInitialize(engine_, true));
EXPECT_CALL(callback_, OnFilterCallback());
@@ -264,7 +266,7 @@ TEST_F(FFmpegVideoDecoderTest, Initialize_EngineFails) {
EXPECT_CALL(*demuxer_, GetAVStream())
.WillOnce(Return(&stream_));
- EXPECT_CALL(*engine_, Initialize(_, _, _))
+ EXPECT_CALL(*engine_, Initialize(_, _, _, _))
.WillOnce(EngineInitialize(engine_, false));
EXPECT_CALL(host_, SetError(PIPELINE_ERROR_DECODE));
diff --git a/media/filters/omx_video_decoder.cc b/media/filters/omx_video_decoder.cc
index 80a5a13..82793de 100644
--- a/media/filters/omx_video_decoder.cc
+++ b/media/filters/omx_video_decoder.cc
@@ -110,7 +110,7 @@ void OmxVideoDecoder::Initialize(DemuxerStream* demuxer_stream,
config.opaque_context = NULL;
config.width = width_;
config.height = height_;
- omx_engine_->Initialize(message_loop(), this, config);
+ omx_engine_->Initialize(message_loop(), this, NULL, config);
}
void OmxVideoDecoder::OnInitializeComplete(const VideoCodecInfo& info) {
diff --git a/media/mf/mft_h264_decoder.cc b/media/mf/mft_h264_decoder.cc
index 6d85f70..e8d6b05 100644
--- a/media/mf/mft_h264_decoder.cc
+++ b/media/mf/mft_h264_decoder.cc
@@ -170,6 +170,7 @@ MftH264Decoder::~MftH264Decoder() {
void MftH264Decoder::Initialize(
MessageLoop* message_loop,
VideoDecodeEngine::EventHandler* event_handler,
+ VideoDecodeContext* context,
const VideoCodecConfig& config) {
LOG(INFO) << "MftH264Decoder::Initialize";
if (state_ != kUninitialized) {
diff --git a/media/mf/mft_h264_decoder.h b/media/mf/mft_h264_decoder.h
index 61e3c65..57c9e9f 100644
--- a/media/mf/mft_h264_decoder.h
+++ b/media/mf/mft_h264_decoder.h
@@ -36,7 +36,8 @@ class MftH264Decoder : public media::VideoDecodeEngine {
explicit MftH264Decoder(bool use_dxva, HWND draw_window);
~MftH264Decoder();
virtual void Initialize(MessageLoop* message_loop,
- media::VideoDecodeEngine::EventHandler* event_handler,
+ VideoDecodeEngine::EventHandler* event_handler,
+ VideoDecodeContext* context,
const VideoCodecConfig& config);
virtual void Uninitialize();
virtual void Flush();
diff --git a/media/mf/mft_h264_decoder_example.cc b/media/mf/mft_h264_decoder_example.cc
index 788a2ca..0ed9553 100644
--- a/media/mf/mft_h264_decoder_example.cc
+++ b/media/mf/mft_h264_decoder_example.cc
@@ -362,11 +362,7 @@ static int Run(bool use_dxva, bool render, const std::string& input_file) {
return -1;
}
- mft->Initialize(MessageLoop::current(), handler.get(), config);
- if (!handler->info_.success) {
- LOG(ERROR) << "Failed to initialize decoder";
- return -1;
- }
+ mft->Initialize(MessageLoop::current(), handler.get(), NULL, config);
scoped_ptr<WindowObserver> observer;
if (render) {
observer.reset(new WindowObserver(reader.get(), mft.get()));
diff --git a/media/mf/test/mft_h264_decoder_unittest.cc b/media/mf/test/mft_h264_decoder_unittest.cc
index 67dc07c..11959f7 100644
--- a/media/mf/test/mft_h264_decoder_unittest.cc
+++ b/media/mf/test/mft_h264_decoder_unittest.cc
@@ -207,7 +207,7 @@ TEST_F(MftH264DecoderTest, DecoderInitMissingArgs) {
config.height = 600;
scoped_ptr<MftH264Decoder> decoder(new MftH264Decoder(false, NULL));
ASSERT_TRUE(decoder.get());
- decoder->Initialize(NULL, NULL, config);
+ decoder->Initialize(NULL, NULL, NULL, config);
EXPECT_EQ(MftH264Decoder::kUninitialized, decoder->state());
}
@@ -219,7 +219,7 @@ TEST_F(MftH264DecoderTest, DecoderInitNoDxva) {
config.height = 600;
scoped_ptr<MftH264Decoder> decoder(new MftH264Decoder(false, NULL));
ASSERT_TRUE(decoder.get());
- decoder->Initialize(&loop, &handler, config);
+ decoder->Initialize(&loop, &handler, NULL, config);
EXPECT_EQ(1, handler.init_count_);
EXPECT_EQ(MftH264Decoder::kNormal, decoder->state());
decoder->Uninitialize();
@@ -235,7 +235,7 @@ TEST_F(MftH264DecoderTest, DecoderInitDxva) {
ASSERT_TRUE(hwnd);
scoped_ptr<MftH264Decoder> decoder(new MftH264Decoder(true, hwnd));
ASSERT_TRUE(decoder.get());
- decoder->Initialize(&loop, &handler, config);
+ decoder->Initialize(&loop, &handler, NULL, config);
EXPECT_EQ(1, handler.init_count_);
EXPECT_EQ(MftH264Decoder::kNormal, decoder->state());
decoder->Uninitialize();
@@ -250,7 +250,7 @@ TEST_F(MftH264DecoderTest, DecoderUninit) {
config.height = 600;
scoped_ptr<MftH264Decoder> decoder(new MftH264Decoder(false, NULL));
ASSERT_TRUE(decoder.get());
- decoder->Initialize(&loop, &handler, config);
+ decoder->Initialize(&loop, &handler, NULL, config);
EXPECT_EQ(MftH264Decoder::kNormal, decoder->state());
decoder->Uninitialize();
EXPECT_EQ(1, handler.uninit_count_);
@@ -277,7 +277,7 @@ TEST_F(MftH264DecoderTest, InitWithNegativeDimensions) {
config.height = -456;
scoped_ptr<MftH264Decoder> decoder(new MftH264Decoder(false, NULL));
ASSERT_TRUE(decoder.get());
- decoder->Initialize(&loop, &handler, config);
+ decoder->Initialize(&loop, &handler, NULL, config);
EXPECT_EQ(MftH264Decoder::kNormal, decoder->state());
EXPECT_EQ(kDecoderMaxWidth, handler.info_.stream_info.surface_width);
EXPECT_EQ(kDecoderMaxHeight, handler.info_.stream_info.surface_height);
@@ -292,7 +292,7 @@ TEST_F(MftH264DecoderTest, InitWithTooHighDimensions) {
config.height = kDecoderMaxHeight + 1;
scoped_ptr<MftH264Decoder> decoder(new MftH264Decoder(false, NULL));
ASSERT_TRUE(decoder.get());
- decoder->Initialize(&loop, &handler, config);
+ decoder->Initialize(&loop, &handler, NULL, config);
EXPECT_EQ(MftH264Decoder::kNormal, decoder->state());
EXPECT_EQ(kDecoderMaxWidth, handler.info_.stream_info.surface_width);
EXPECT_EQ(kDecoderMaxHeight, handler.info_.stream_info.surface_height);
@@ -307,7 +307,7 @@ TEST_F(MftH264DecoderTest, DrainOnEmptyBuffer) {
config.height = 768;
scoped_ptr<MftH264Decoder> decoder(new MftH264Decoder(false, NULL));
ASSERT_TRUE(decoder.get());
- decoder->Initialize(&loop, &handler, config);
+ decoder->Initialize(&loop, &handler, NULL, config);
EXPECT_EQ(MftH264Decoder::kNormal, decoder->state());
scoped_refptr<Buffer> buffer(new DataBuffer(0));
@@ -336,7 +336,7 @@ TEST_F(MftH264DecoderTest, NoOutputOnGarbageInput) {
config.height = 768;
scoped_ptr<MftH264Decoder> decoder(new MftH264Decoder(false, NULL));
ASSERT_TRUE(decoder.get());
- decoder->Initialize(&loop, &handler, config);
+ decoder->Initialize(&loop, &handler, NULL, config);
EXPECT_EQ(MftH264Decoder::kNormal, decoder->state());
handler.SetReader(reader);
handler.SetDecoder(decoder.get());
@@ -364,7 +364,7 @@ TEST_F(MftH264DecoderTest, FlushAtStart) {
config.height = 768;
scoped_ptr<MftH264Decoder> decoder(new MftH264Decoder(false, NULL));
ASSERT_TRUE(decoder.get());
- decoder->Initialize(&loop, &handler, config);
+ decoder->Initialize(&loop, &handler, NULL, config);
EXPECT_EQ(MftH264Decoder::kNormal, decoder->state());
decoder->Flush();
@@ -384,7 +384,7 @@ TEST_F(MftH264DecoderTest, NoFlushAtStopped) {
config.height = 768;
scoped_ptr<MftH264Decoder> decoder(new MftH264Decoder(false, NULL));
ASSERT_TRUE(decoder.get());
- decoder->Initialize(&loop, &handler, config);
+ decoder->Initialize(&loop, &handler, NULL, config);
EXPECT_EQ(MftH264Decoder::kNormal, decoder->state());
handler.SetReader(reader);
handler.SetDecoder(decoder.get());
@@ -429,7 +429,7 @@ void DecodeValidVideo(const std::string& filename, int num_frames, bool dxva) {
ASSERT_TRUE(hwnd);
scoped_ptr<MftH264Decoder> decoder(new MftH264Decoder(dxva, hwnd));
ASSERT_TRUE(decoder.get());
- decoder->Initialize(&loop, &handler, config);
+ decoder->Initialize(&loop, &handler, NULL, config);
EXPECT_EQ(MftH264Decoder::kNormal, decoder->state());
handler.SetReader(reader);
handler.SetDecoder(decoder.get());
diff --git a/media/tools/omx_test/omx_test.cc b/media/tools/omx_test/omx_test.cc
index 44b728f..505f060 100644
--- a/media/tools/omx_test/omx_test.cc
+++ b/media/tools/omx_test/omx_test.cc
@@ -189,7 +189,7 @@ class TestApp : public base::RefCountedThreadSafe<TestApp>,
config.width = av_stream_->codec->width;
config.height = av_stream_->codec->height;
engine_.reset(new OmxVideoDecodeEngine());
- engine_->Initialize(&message_loop_, this, config);
+ engine_->Initialize(&message_loop_, this, NULL, config);
// Execute the message loop so that we can run tasks on it. This call
// will return when we call message_loop_.Quit().
diff --git a/media/video/ffmpeg_video_decode_engine.cc b/media/video/ffmpeg_video_decode_engine.cc
index 75be752..7a8181b 100644
--- a/media/video/ffmpeg_video_decode_engine.cc
+++ b/media/video/ffmpeg_video_decode_engine.cc
@@ -35,6 +35,7 @@ FFmpegVideoDecodeEngine::~FFmpegVideoDecodeEngine() {
void FFmpegVideoDecodeEngine::Initialize(
MessageLoop* message_loop,
VideoDecodeEngine::EventHandler* event_handler,
+ VideoDecodeContext* context,
const VideoCodecConfig& config) {
allocator_.reset(new FFmpegVideoAllocator());
diff --git a/media/video/ffmpeg_video_decode_engine.h b/media/video/ffmpeg_video_decode_engine.h
index bc1e033..a7ce4e4 100644
--- a/media/video/ffmpeg_video_decode_engine.h
+++ b/media/video/ffmpeg_video_decode_engine.h
@@ -28,6 +28,7 @@ class FFmpegVideoDecodeEngine : public VideoDecodeEngine {
// Implementation of the VideoDecodeEngine Interface.
virtual void Initialize(MessageLoop* message_loop,
VideoDecodeEngine::EventHandler* event_handler,
+ VideoDecodeContext* context,
const VideoCodecConfig& config);
virtual void ConsumeVideoSample(scoped_refptr<Buffer> buffer);
virtual void ProduceVideoFrame(scoped_refptr<VideoFrame> frame);
diff --git a/media/video/ffmpeg_video_decode_engine_unittest.cc b/media/video/ffmpeg_video_decode_engine_unittest.cc
index 7db2f55..9f737ba 100644
--- a/media/video/ffmpeg_video_decode_engine_unittest.cc
+++ b/media/video/ffmpeg_video_decode_engine_unittest.cc
@@ -91,7 +91,7 @@ class FFmpegVideoDecodeEngineTest : public testing::Test,
config_.height = kHeight;
EXPECT_CALL(*this, OnInitializeComplete(_))
.WillOnce(SaveInitializeResult(this));
- test_engine_->Initialize(MessageLoop::current(), this, config_);
+ test_engine_->Initialize(MessageLoop::current(), this, NULL, config_);
EXPECT_TRUE(info_.success);
}
@@ -143,7 +143,7 @@ TEST_F(FFmpegVideoDecodeEngineTest, Initialize_FindDecoderFails) {
config_.height = kHeight;
EXPECT_CALL(*this, OnInitializeComplete(_))
.WillOnce(SaveInitializeResult(this));
- test_engine_->Initialize(MessageLoop::current(), this, config_);
+ test_engine_->Initialize(MessageLoop::current(), this, NULL, config_);
EXPECT_FALSE(info_.success);
}
@@ -165,7 +165,7 @@ TEST_F(FFmpegVideoDecodeEngineTest, Initialize_InitThreadFails) {
config_.height = kHeight;
EXPECT_CALL(*this, OnInitializeComplete(_))
.WillOnce(SaveInitializeResult(this));
- test_engine_->Initialize(MessageLoop::current(), this, config_);
+ test_engine_->Initialize(MessageLoop::current(), this, NULL, config_);
EXPECT_FALSE(info_.success);
}
@@ -188,7 +188,7 @@ TEST_F(FFmpegVideoDecodeEngineTest, Initialize_OpenDecoderFails) {
config_.height = kHeight;
EXPECT_CALL(*this, OnInitializeComplete(_))
.WillOnce(SaveInitializeResult(this));
- test_engine_->Initialize(MessageLoop::current(), this, config_);
+ test_engine_->Initialize(MessageLoop::current(), this, NULL, config_);
EXPECT_FALSE(info_.success);
}
diff --git a/media/video/omx_video_decode_engine.cc b/media/video/omx_video_decode_engine.cc
index e1bcc7f..0df0f1e 100644
--- a/media/video/omx_video_decode_engine.cc
+++ b/media/video/omx_video_decode_engine.cc
@@ -82,6 +82,7 @@ static void ResetParamHeader(const OmxVideoDecodeEngine& dec, T* param) {
void OmxVideoDecodeEngine::Initialize(
MessageLoop* message_loop,
VideoDecodeEngine::EventHandler* event_handler,
+ VideoDecodeContext* context,
const VideoCodecConfig& config) {
DCHECK_EQ(message_loop, MessageLoop::current());
diff --git a/media/video/omx_video_decode_engine.h b/media/video/omx_video_decode_engine.h
index 8347eed..c5b3882 100644
--- a/media/video/omx_video_decode_engine.h
+++ b/media/video/omx_video_decode_engine.h
@@ -28,6 +28,7 @@ class OmxVideoDecodeEngine : public VideoDecodeEngine {
// Implementation of the VideoDecodeEngine Interface.
virtual void Initialize(MessageLoop* message_loop,
VideoDecodeEngine::EventHandler* event_handler,
+ VideoDecodeContext* context,
const VideoCodecConfig& config);
virtual void ConsumeVideoSample(scoped_refptr<Buffer> buffer);
virtual void ProduceVideoFrame(scoped_refptr<VideoFrame> frame);
diff --git a/media/video/video_decode_context.h b/media/video/video_decode_context.h
index 795b136..f768a0a 100644
--- a/media/video/video_decode_context.h
+++ b/media/video/video_decode_context.h
@@ -20,6 +20,13 @@ class VideoFrame;
// 1. Provides access to hardware video decoding device.
// 2. Allocate VideoFrame objects that are used to carry the decoded video
// frames.
+// 3. Upload a device specific buffer to some common VideoFrame storage types.
+// In many cases a VideoDecodeEngine provides its own buffer, these buffer
+// are usually device specific and a conversion step is needed. Instead of
+// handling these many cases in the renderer a VideoDecodeContext is used
+// to convert the device specific buffer to a common storage format, e.g.
+// GL textures or system memory. This way we keep the device specific code
+// in the VideoDecodeEngine and VideoDecodeContext pair.
class VideoDecodeContext {
public:
virtual ~VideoDecodeContext() {};
@@ -45,11 +52,30 @@ class VideoDecodeContext {
// could be destroyed.
virtual void ReleaseAllVideoFrames() = 0;
+ // Upload a device specific buffer to a video frame. The video frame was
+ // allocated via AllocateVideoFrames().
+ // This method is used if a VideoDecodeEngine cannot write directly to a
+ // VideoFrame, e.g. upload should be done on a different thread, the subsystem
+ // require some special treatment to generate a VideoFrame. The goal is to
+ // keep VideoDecodeEngine a reusable component and also adapt to different
+ // system by having a different VideoDecodeContext.
+ //
+ // |frame| is a VideoFrame allocated via AllocateVideoFrames().
+ //
+ // |buffer| is of type void*, it is of an internal type in VideoDecodeEngine
+ // that points to the buffer that contains the video frame.
+ // Implementor should know how to handle it.
+ //
+ // |task| is executed if the operation was completed successfully.
+ // TODO(hclam): Rename this to ConvertToVideoFrame().
+ virtual void UploadToVideoFrame(void* buffer, scoped_refptr<VideoFrame> frame,
+ Task* task) = 0;
+
// Destroy this context asynchronously. When the operation is done |task|
// is called.
//
// ReleaseVideoFrames() need to be called with all the video frames allocated
- // before making this call.
+ // before making this call.
virtual void Destroy(Task* task) = 0;
};
diff --git a/media/video/video_decode_engine.h b/media/video/video_decode_engine.h
index 8736890..f9381a2 100644
--- a/media/video/video_decode_engine.h
+++ b/media/video/video_decode_engine.h
@@ -12,6 +12,7 @@
namespace media {
class Buffer;
+class VideoDecodeContext;
enum VideoCodec {
kCodecH264,
@@ -116,13 +117,19 @@ class VideoDecodeEngine {
virtual ~VideoDecodeEngine() {}
- // Initialized the engine with specified configuration. |message_loop| could
- // be NULL if every operation is synchronous. Engine should call the
- // EventHandler::OnInitializeDone() no matter finished successfully or not.
- // TODO(jiesun): remove message_loop and create thread inside openmax engine?
- // or create thread in GpuVideoDecoder and pass message loop here?
+ // Initialize the engine with specified configuration.
+ //
+ // |decode_context| is used for allocation of VideoFrame.
+ // It is important that |decode_context| is called only on |message_loop|.
+ //
+ // TODO(hclam): Currently refactoring code to use VideoDecodeContext so
+ // |context| may be NULL in some cases.
+ //
+ // Engine should call EventHandler::OnInitializeDone() whether the
+ // initialization operation finished successfully or not.
virtual void Initialize(MessageLoop* message_loop,
EventHandler* event_handler,
+ VideoDecodeContext* context,
const VideoCodecConfig& config) = 0;
// Uninitialize the engine. Engine should destroy all resources and call