summaryrefslogtreecommitdiffstats
path: root/media
diff options
context:
space:
mode:
authorajwong@chromium.org <ajwong@chromium.org@0039d316-1c4b-4281-b951-d872f2087c98>2009-12-22 21:27:52 +0000
committerajwong@chromium.org <ajwong@chromium.org@0039d316-1c4b-4281-b951-d872f2087c98>2009-12-22 21:27:52 +0000
commit55424ebf0473d77fd3acdfd3d4d1fab52e5ecdf3 (patch)
treecc33197cf9a838dde3d9efbd62229c74e8f67320 /media
parent6307cdcd4c47a404c48415b5ac2303f2590d00c3 (diff)
downloadchromium_src-55424ebf0473d77fd3acdfd3d4d1fab52e5ecdf3.zip
chromium_src-55424ebf0473d77fd3acdfd3d4d1fab52e5ecdf3.tar.gz
chromium_src-55424ebf0473d77fd3acdfd3d4d1fab52e5ecdf3.tar.bz2
Implementation of OmxVideoDecodeEngine.
Also moves FFmpegVideoDecodeEngine FFmpegVideoDecoder, OmxVideoDecoder, and VideoDecoderImpl into their own files. Refactors FFmpegDemuxerTest to be less of a characterization test, and to hopefully be less fragile. Creates a set of utilities for handling Callbacks versus Tasks, and resource management related to Callbacks. Re-enables the annexb filters for the chrome build of FFmpeg. Added a BitstreamConverter class that abstracts the bitstream filter code. Cleans up a few gyp mistakes with flag exporting. Review URL: http://codereview.chromium.org/492023 git-svn-id: svn://svn.chromium.org/chrome/trunk/src@35171 0039d316-1c4b-4281-b951-d872f2087c98
Diffstat (limited to 'media')
-rw-r--r--media/base/callback.h73
-rw-r--r--media/base/media_switches.cc2
-rw-r--r--media/base/media_switches.h2
-rw-r--r--media/base/mock_ffmpeg.h12
-rw-r--r--media/filters/bitstream_converter.cc68
-rw-r--r--media/filters/bitstream_converter.h82
-rw-r--r--media/filters/bitstream_converter_unittest.cc169
-rw-r--r--media/filters/ffmpeg_common.h12
-rw-r--r--media/filters/ffmpeg_demuxer.cc45
-rw-r--r--media/filters/ffmpeg_demuxer.h6
-rw-r--r--media/filters/ffmpeg_demuxer_unittest.cc191
-rw-r--r--media/filters/ffmpeg_video_decode_engine.cc125
-rw-r--r--media/filters/ffmpeg_video_decode_engine.h49
-rw-r--r--media/filters/ffmpeg_video_decode_engine_unittest.cc1
-rw-r--r--media/filters/ffmpeg_video_decoder.cc421
-rw-r--r--media/filters/ffmpeg_video_decoder.h126
-rw-r--r--media/filters/omx_video_decode_engine.cc166
-rw-r--r--media/filters/omx_video_decode_engine.h95
-rw-r--r--media/filters/omx_video_decoder.cc59
-rw-r--r--media/filters/omx_video_decoder.h37
-rw-r--r--media/filters/video_decode_engine.h3
-rw-r--r--media/filters/video_decoder_impl.cc317
-rw-r--r--media/filters/video_decoder_impl.h114
-rw-r--r--media/filters/video_decoder_impl_unittest.cc (renamed from media/filters/ffmpeg_video_decoder_unittest.cc)85
-rw-r--r--media/media.gyp24
-rw-r--r--media/omx/omx_codec.cc8
-rw-r--r--media/tools/player_x11/player_x11.cc10
27 files changed, 1602 insertions, 700 deletions
diff --git a/media/base/callback.h b/media/base/callback.h
index 553c842..d4b56fc5 100644
--- a/media/base/callback.h
+++ b/media/base/callback.h
@@ -3,13 +3,24 @@
// LICENSE file.
// Some basic utilities for aiding in the management of Tasks and Callbacks.
+//
// AutoTaskRunner, and its brother AutoCallbackRunner are the scoped_ptr
// equivalents for callbacks. They are useful for ensuring a callback is
// executed and delete in the face of multiple return points in a function.
+//
+// TaskToCallbackAdapter converts a Task to a Callback0::Type since the two type
+// heirarchies are strangely separate.
+//
+// CleanupCallback wraps another Callback and provides the ability to register
+// objects for deletion as well as cleanup tasks that will be run on the
+// callback's destruction. The deletion and cleanup tasks will be run on
+// whatever thread the CleanupCallback is destroyed in.
#ifndef MEDIA_BASE_CALLBACK_
#define MEDIA_BASE_CALLBACK_
+#include <vector>
+
#include "base/scoped_ptr.h"
#include "base/task.h"
@@ -57,6 +68,68 @@ class AutoCallbackRunner {
DISALLOW_COPY_AND_ASSIGN(AutoCallbackRunner);
};
+class TaskToCallbackAdapter : public Callback0::Type {
+ public:
+ static Callback0::Type* NewCallback(Task* task) {
+ return new TaskToCallbackAdapter(task);
+ }
+
+ virtual ~TaskToCallbackAdapter() {}
+
+ virtual void RunWithParams(const Tuple0& params) { task_->Run(); }
+
+ private:
+ TaskToCallbackAdapter(Task* task) : task_(task) {}
+
+ scoped_ptr<Task> task_;
+
+ DISALLOW_COPY_AND_ASSIGN(TaskToCallbackAdapter);
+};
+
+template <typename CallbackType>
+class CleanupCallback : public CallbackType {
+ public:
+ explicit CleanupCallback(CallbackType* callback) : callback_(callback) {}
+
+ virtual ~CleanupCallback() {
+ for (size_t i = 0; i < run_when_done_.size(); i++) {
+ run_when_done_[i]->Run();
+ delete run_when_done_[i];
+ }
+ }
+
+ virtual void RunWithParams(const typename CallbackType::TupleType& params) {
+ callback_->RunWithParams(params);
+ }
+
+ template <typename T>
+ void DeleteWhenDone(T* ptr) {
+ class Deleter : public Task {
+ public:
+ Deleter(T* p) : ptr_(p) {}
+
+ virtual void Run() {
+ delete ptr_;
+ }
+
+ private:
+ T* ptr_;
+ };
+
+ RunWhenDone(new Deleter(ptr));
+ }
+
+ void RunWhenDone(Task* ptr) {
+ run_when_done_.push_back(ptr);
+ }
+
+ private:
+ scoped_ptr<CallbackType> callback_;
+ std::vector<Task*> run_when_done_;
+
+ DISALLOW_COPY_AND_ASSIGN(CleanupCallback);
+};
+
} // namespace media
#endif // MEDIA_BASE_CALLBACK_
diff --git a/media/base/media_switches.cc b/media/base/media_switches.cc
index f40ce26..1295eb2 100644
--- a/media/base/media_switches.cc
+++ b/media/base/media_switches.cc
@@ -11,4 +11,6 @@ namespace switches {
const char kAlsaDevice[] = "alsa-device";
#endif
+const char kVideoH264Annexb[] = "video-h264-annexb";
+
} // namespace switches
diff --git a/media/base/media_switches.h b/media/base/media_switches.h
index 4afa520..86773bd 100644
--- a/media/base/media_switches.h
+++ b/media/base/media_switches.h
@@ -15,6 +15,8 @@ namespace switches {
extern const char kAlsaDevice[];
#endif
+extern const char kVideoH264Annexb[];
+
} // namespace switches
#endif // MEDIA_BASE_SWITCHES_H_
diff --git a/media/base/mock_ffmpeg.h b/media/base/mock_ffmpeg.h
index cfa9a1e..4a7de2b 100644
--- a/media/base/mock_ffmpeg.h
+++ b/media/base/mock_ffmpeg.h
@@ -102,6 +102,18 @@ ACTION_P3(CreatePacket, stream_index, data, size) {
return 0;
}
+// Used for simulating av_read_frame().
+ACTION_P3(CreatePacketNoCount, stream_index, data, size) {
+ // Confirm we're dealing with AVPacket so we can safely const_cast<>.
+ ::testing::StaticAssertTypeEq<AVPacket*, arg1_type>();
+ memset(arg1, 0, sizeof(*arg1));
+ arg1->stream_index = stream_index;
+ arg1->data = const_cast<uint8*>(data);
+ arg1->size = size;
+
+ return 0;
+}
+
// Used for simulating av_new_packet().
ACTION(NewPacket) {
::testing::StaticAssertTypeEq<AVPacket*, arg0_type>();
diff --git a/media/filters/bitstream_converter.cc b/media/filters/bitstream_converter.cc
new file mode 100644
index 0000000..c8ec355
--- /dev/null
+++ b/media/filters/bitstream_converter.cc
@@ -0,0 +1,68 @@
+// Copyright (c) 2009 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "media/filters/bitstream_converter.h"
+
+#include "media/filters/ffmpeg_common.h"
+
+namespace media {
+
+FFmpegBitstreamConverter::FFmpegBitstreamConverter(
+ const std::string& filter_name,
+ AVCodecContext* stream_context)
+ : filter_name_(filter_name),
+ stream_filter_(NULL),
+ stream_context_(stream_context) {
+ CHECK(stream_context_);
+}
+
+FFmpegBitstreamConverter::~FFmpegBitstreamConverter() {
+ if (stream_filter_) {
+ av_bitstream_filter_close(stream_filter_);
+ stream_filter_ = NULL;
+ }
+}
+
+bool FFmpegBitstreamConverter::Initialize() {
+ stream_filter_ = av_bitstream_filter_init(filter_name_.c_str());
+ return stream_filter_ != NULL;
+}
+
+bool FFmpegBitstreamConverter::ConvertPacket(AVPacket* packet) {
+ CHECK(packet);
+
+ if (!stream_filter_) {
+ LOG(ERROR) << "Converter improperly initialized.";
+ return false;
+ }
+
+ uint8_t* converted_data = NULL;
+ int converted_size = 0;
+
+ if (av_bitstream_filter_filter(stream_filter_, stream_context_, NULL,
+ &converted_data, &converted_size,
+ packet->data, packet->size,
+ packet->flags & PKT_FLAG_KEY) < 0) {
+ return false;
+ }
+
+ // av_bitstream_filter_filter() does not always allocate a new packet.
+ // If a new packet was allocated, then we need to modify the
+ // |packet| to point to the new data, releasing its current payload
+ // if it has the authoritative reference.
+ //
+ // TODO(ajwong): We're relying on the implementation behavior of
+ // av_free_packet() and the meaning of the |destruct| field in
+ // AVPacket. Try to find a cleaner way to do this.
+ if (converted_data != packet->data) {
+ av_free_packet(packet);
+ packet->data = converted_data;
+ packet->size = converted_size;
+ packet->destruct = av_destruct_packet;
+ }
+
+ return true;
+}
+
+} // namespace media
diff --git a/media/filters/bitstream_converter.h b/media/filters/bitstream_converter.h
new file mode 100644
index 0000000..d52a4b2
--- /dev/null
+++ b/media/filters/bitstream_converter.h
@@ -0,0 +1,82 @@
+// Copyright (c) 2009 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Interface and some concrete classes for applying various transforms
+// to AVPackets. FFmpegBitstreamConverter, in particular, can be used
+// to apply FFmpeg bitstream filters to the incoming AVPacket to transcode
+// the packet format.
+
+#ifndef MEDIA_FILTERS_BITSTREAM_CONVERTER_H_
+#define MEDIA_FILTERS_BITSTREAM_CONVERTER_H_
+
+#include <string>
+
+#include "base/basictypes.h"
+#include "base/logging.h"
+
+#include "testing/gtest/include/gtest/gtest_prod.h"
+
+// FFmpeg types.
+struct AVBitStreamFilterContext;
+struct AVCodecContext;
+struct AVPacket;
+
+namespace media {
+
+class BitstreamConverter {
+ public:
+ BitstreamConverter() {}
+ virtual ~BitstreamConverter() {}
+
+ // Attemps to convert the AVPacket from one format to another, based on the
+ // specific type of BitstreamConverter that was instantiated.
+ virtual bool Initialize() = 0;
+ virtual bool ConvertPacket(AVPacket* packet) = 0;
+
+ private:
+ DISALLOW_COPY_AND_ASSIGN(BitstreamConverter);
+};
+
+class IdentityBitstreamConverter : public BitstreamConverter {
+ public:
+ IdentityBitstreamConverter() {}
+ virtual ~IdentityBitstreamConverter() {}
+
+ virtual bool Initialize() { return true; }
+ virtual bool ConvertPacket(AVPacket* packet) { return true; }
+
+ private:
+ DISALLOW_COPY_AND_ASSIGN(IdentityBitstreamConverter);
+};
+
+class FFmpegBitstreamConverter : public BitstreamConverter {
+ public:
+ // Creates FFmpegBitstreamConverter based on the FFmpeg bistream filter
+ // corresponding to |filter_name|.
+ //
+ // The |stream_context| will be used during conversion and should be the
+ // AVCodecContext for the stream sourcing these packets. A reference to
+ // |stream_context| is retained, so it must outlive this class.
+ FFmpegBitstreamConverter(const std::string& filter_name,
+ AVCodecContext* stream_context);
+ virtual ~FFmpegBitstreamConverter();
+
+ virtual bool Initialize();
+ virtual bool ConvertPacket(AVPacket* packet);
+
+ private:
+ FRIEND_TEST(BitstreamConverterTest, ConvertPacket_FailedFilter);
+ FRIEND_TEST(BitstreamConverterTest, ConvertPacket_Success);
+ FRIEND_TEST(BitstreamConverterTest, ConvertPacket_SuccessInPlace);
+
+ std::string filter_name_;
+ AVBitStreamFilterContext* stream_filter_;
+ AVCodecContext* stream_context_;
+
+ DISALLOW_COPY_AND_ASSIGN(FFmpegBitstreamConverter);
+};
+
+} // namespace media
+
+#endif // MEDIA_FILTERS_BITSTREAM_CONVERTER_H_
diff --git a/media/filters/bitstream_converter_unittest.cc b/media/filters/bitstream_converter_unittest.cc
new file mode 100644
index 0000000..007becb
--- /dev/null
+++ b/media/filters/bitstream_converter_unittest.cc
@@ -0,0 +1,169 @@
+// Copyright (c) 2009 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include <deque>
+
+#include "media/base/mock_ffmpeg.h"
+#include "media/filters/bitstream_converter.h"
+#include "media/filters/ffmpeg_common.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+using ::testing::DoAll;
+using ::testing::Mock;
+using ::testing::Return;
+using ::testing::ReturnNull;
+using ::testing::SetArgumentPointee;
+using ::testing::StrEq;
+using ::testing::StrictMock;
+using ::testing::_;
+
+namespace media {
+
+class BitstreamConverterTest : public testing::Test {
+ protected:
+ BitstreamConverterTest() {
+ // Initialize MockFFmpeg.
+ MockFFmpeg::set(&mock_ffmpeg_);
+
+ memset(&test_stream_context_, 0, sizeof(test_stream_context_));
+ memset(&test_filter_, 0, sizeof(test_filter_));
+ memset(&test_packet_, 0, sizeof(test_packet_));
+ test_packet_.data = kData1;
+ test_packet_.size = kTestSize1;
+ }
+
+ virtual ~BitstreamConverterTest() {
+ // Reset MockFFmpeg.
+ MockFFmpeg::set(NULL);
+ }
+
+ AVCodecContext test_stream_context_;
+ AVBitStreamFilterContext test_filter_;
+ AVPacket test_packet_;
+
+ StrictMock<MockFFmpeg> mock_ffmpeg_;
+
+ static const char kTestFilterName[];
+ static uint8_t kData1[];
+ static uint8_t kData2[];
+ static const int kTestSize1;
+ static const int kTestSize2;
+
+ private:
+ DISALLOW_COPY_AND_ASSIGN(BitstreamConverterTest);
+};
+
+const char BitstreamConverterTest::kTestFilterName[] = "test_filter";
+uint8_t BitstreamConverterTest::kData1[] = { 1 };
+uint8_t BitstreamConverterTest::kData2[] = { 2 };
+const int BitstreamConverterTest::kTestSize1 = 1;
+const int BitstreamConverterTest::kTestSize2 = 2;
+
+TEST_F(BitstreamConverterTest, Initialize) {
+ FFmpegBitstreamConverter converter(kTestFilterName, &test_stream_context_);
+
+ // Test Initialize returns false on a bad initialization, and cleanup is not
+ // done.
+ EXPECT_CALL(mock_ffmpeg_, AVBitstreamFilterInit(StrEq(kTestFilterName)))
+ .WillOnce(ReturnNull());
+ EXPECT_FALSE(converter.Initialize());
+
+ EXPECT_TRUE(Mock::VerifyAndClearExpectations(&mock_ffmpeg_));
+
+ // Test Initialize returns true on successful initialization, and cleanup is
+ // done. The cleanup will be activated when the converter object goes out of
+ // scope.
+ EXPECT_CALL(mock_ffmpeg_, AVBitstreamFilterInit(StrEq(kTestFilterName)))
+ .WillOnce(Return(&test_filter_));
+ EXPECT_CALL(mock_ffmpeg_, AVBitstreamFilterClose(&test_filter_));
+ EXPECT_TRUE(converter.Initialize());
+}
+
+TEST_F(BitstreamConverterTest, ConvertPacket_NotInitialized) {
+ FFmpegBitstreamConverter converter(kTestFilterName, &test_stream_context_);
+
+ EXPECT_FALSE(converter.ConvertPacket(&test_packet_));
+}
+
+TEST_F(BitstreamConverterTest, ConvertPacket_FailedFilter) {
+ FFmpegBitstreamConverter converter(kTestFilterName, &test_stream_context_);
+
+ // Inject mock filter instance.
+ converter.stream_filter_ = &test_filter_;
+
+ // Simulate a successful filter call, that allocates a new data buffer.
+ EXPECT_CALL(mock_ffmpeg_,
+ AVBitstreamFilterFilter(&test_filter_, &test_stream_context_,
+ NULL, _, _,
+ test_packet_.data, test_packet_.size, _))
+ .WillOnce(Return(AVERROR_UNKNOWN));
+
+ EXPECT_FALSE(converter.ConvertPacket(&test_packet_));
+
+ // Uninject mock filter instance to avoid cleanup code on destruction of
+ // converter.
+ converter.stream_filter_ = NULL;
+}
+
+TEST_F(BitstreamConverterTest, ConvertPacket_Success) {
+ FFmpegBitstreamConverter converter(kTestFilterName, &test_stream_context_);
+
+ // Inject mock filter instance.
+ converter.stream_filter_ = &test_filter_;
+
+ // Ensure our packet doesn't already have a destructor.
+ ASSERT_TRUE(test_packet_.destruct == NULL);
+
+ // Simulate a successful filter call, that allocates a new data buffer.
+ EXPECT_CALL(mock_ffmpeg_,
+ AVBitstreamFilterFilter(&test_filter_, &test_stream_context_,
+ NULL, _, _,
+ test_packet_.data, test_packet_.size, _))
+ .WillOnce(DoAll(SetArgumentPointee<3>(&kData2[0]),
+ SetArgumentPointee<4>(kTestSize2),
+ Return(0)));
+ EXPECT_CALL(mock_ffmpeg_, AVFreePacket(&test_packet_));
+
+ EXPECT_TRUE(converter.ConvertPacket(&test_packet_));
+ EXPECT_EQ(kData2, test_packet_.data);
+ EXPECT_EQ(kTestSize2, test_packet_.size);
+ EXPECT_TRUE(test_packet_.destruct != NULL);
+
+ // Uninject mock filter instance to avoid cleanup code on destruction of
+ // converter.
+ converter.stream_filter_ = NULL;
+}
+
+TEST_F(BitstreamConverterTest, ConvertPacket_SuccessInPlace) {
+ FFmpegBitstreamConverter converter(kTestFilterName, &test_stream_context_);
+
+ // Inject mock filter instance.
+ converter.stream_filter_ = &test_filter_;
+
+ // Ensure our packet is in a sane start state.
+ ASSERT_TRUE(test_packet_.destruct == NULL);
+ ASSERT_EQ(kData1, test_packet_.data);
+ ASSERT_EQ(kTestSize1, test_packet_.size);
+
+ // Simulate a successful filter call, that reuses the input buffer. We should
+ // not free the packet here or alter the packet's destructor.
+ EXPECT_CALL(mock_ffmpeg_,
+ AVBitstreamFilterFilter(&test_filter_, &test_stream_context_,
+ NULL, _, _,
+ test_packet_.data, test_packet_.size, _))
+ .WillOnce(DoAll(SetArgumentPointee<3>(test_packet_.data),
+ SetArgumentPointee<4>(test_packet_.size),
+ Return(0)));
+
+ EXPECT_TRUE(converter.ConvertPacket(&test_packet_));
+ EXPECT_EQ(kData1, test_packet_.data);
+ EXPECT_EQ(kTestSize1, test_packet_.size);
+ EXPECT_TRUE(test_packet_.destruct == NULL);
+
+ // Uninject mock filter instance to avoid cleanup code on destruction of
+ // converter.
+ converter.stream_filter_ = NULL;
+}
+
+} // namespace media
diff --git a/media/filters/ffmpeg_common.h b/media/filters/ffmpeg_common.h
index 1e792f4..19c8fcf 100644
--- a/media/filters/ffmpeg_common.h
+++ b/media/filters/ffmpeg_common.h
@@ -56,6 +56,18 @@ class ScopedPtrAVFree {
}
};
+// This assumes that the AVPacket being captured was allocated outside of
+// FFmpeg via the new operator. Do not use this with AVPacket instances that
+// are allocated via malloc() or av_malloc().
+class ScopedPtrAVFreePacket {
+ public:
+ inline void operator()(void* x) const {
+ AVPacket* packet = static_cast<AVPacket*>(x);
+ av_free_packet(packet);
+ delete packet;
+ }
+};
+
// FFmpeg MIME types.
namespace mime_type {
diff --git a/media/filters/ffmpeg_demuxer.cc b/media/filters/ffmpeg_demuxer.cc
index 3eab9f6..6e1c1fb 100644
--- a/media/filters/ffmpeg_demuxer.cc
+++ b/media/filters/ffmpeg_demuxer.cc
@@ -2,12 +2,15 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
+#include "base/command_line.h"
#include "base/scoped_ptr.h"
#include "base/stl_util-inl.h"
#include "base/string_util.h"
#include "base/time.h"
#include "media/base/filter_host.h"
+#include "media/base/media_switches.h"
#include "media/ffmpeg/ffmpeg_util.h"
+#include "media/filters/bitstream_converter.h"
#include "media/filters/ffmpeg_common.h"
#include "media/filters/ffmpeg_demuxer.h"
#include "media/filters/ffmpeg_glue.h"
@@ -27,7 +30,6 @@ class AVPacketBuffer : public Buffer {
}
virtual ~AVPacketBuffer() {
- av_free_packet(packet_.get());
}
// Buffer implementation.
@@ -40,7 +42,7 @@ class AVPacketBuffer : public Buffer {
}
private:
- scoped_ptr<AVPacket> packet_;
+ scoped_ptr_malloc<AVPacket, ScopedPtrAVFreePacket> packet_;
DISALLOW_COPY_AND_ASSIGN(AVPacketBuffer);
};
@@ -472,7 +474,7 @@ void FFmpegDemuxer::DemuxTask() {
}
// Allocate and read an AVPacket from the media.
- scoped_ptr<AVPacket> packet(new AVPacket());
+ scoped_ptr_malloc<AVPacket, ScopedPtrAVFreePacket> packet(new AVPacket());
int result = av_read_frame(format_context_, packet.get());
if (result < 0) {
// If we have reached the end of stream, tell the downstream filters about
@@ -489,17 +491,38 @@ void FFmpegDemuxer::DemuxTask() {
DCHECK_LT(packet->stream_index, static_cast<int>(packet_streams_.size()));
FFmpegDemuxerStream* demuxer_stream = packet_streams_[packet->stream_index];
if (demuxer_stream) {
- // If a packet is returned by FFmpeg's av_parser_parse2()
- // the packet will reference an inner memory of FFmpeg.
- // In this case, the packet's "destruct" member is NULL,
- // and it MUST be duplicated. Fixes issue with MP3.
- av_dup_packet(packet.get());
+ using switches::kVideoH264Annexb;
+ AVCodecContext* stream_context =
+ format_context_->streams[packet->stream_index]->codec;
+ if (stream_context->codec_id == CODEC_ID_H264 &&
+ CommandLine::ForCurrentProcess()->HasSwitch(kVideoH264Annexb)) {
+ // TODO(ajwong): Unittest this branch of the if statement.
+ // Also, move this code into the FFmpegDemuxerStream, so that the decoder
+ // can enable a filter in the stream as needed.
+ if (!bitstream_converter_.get()) {
+ bitstream_converter_.reset(
+ new FFmpegBitstreamConverter("h264_mp4toannexb", stream_context));
+ CHECK(bitstream_converter_->Initialize());
+ }
+
+ if (!bitstream_converter_->ConvertPacket(packet.get())) {
+ LOG(ERROR) << "Packet dropped: Format converstion failed.";
+ packet.reset();
+ }
+ }
// Queue the packet with the appropriate stream. The stream takes
// ownership of the AVPacket.
- current_timestamp_ = demuxer_stream->EnqueuePacket(packet.release());
- } else {
- av_free_packet(packet.get());
+ if (packet.get()) {
+ // If a packet is returned by FFmpeg's av_parser_parse2()
+ // the packet will reference an inner memory of FFmpeg.
+ // In this case, the packet's "destruct" member is NULL,
+ // and it MUST be duplicated. This fixes issue with MP3 and possibly
+ // other codecs. It is safe to call this function even if the packet does
+ // not refer to inner memory from FFmpeg.
+ av_dup_packet(packet.get());
+ current_timestamp_ = demuxer_stream->EnqueuePacket(packet.release());
+ }
}
// Create a loop by posting another task. This allows seek and message loop
diff --git a/media/filters/ffmpeg_demuxer.h b/media/filters/ffmpeg_demuxer.h
index f1b13a0..97fbfc8 100644
--- a/media/filters/ffmpeg_demuxer.h
+++ b/media/filters/ffmpeg_demuxer.h
@@ -35,8 +35,6 @@
#include "testing/gtest/include/gtest/gtest_prod.h"
// FFmpeg forward declarations.
-struct AVCodecContext;
-struct AVBitStreamFilterContext;
struct AVFormatContext;
struct AVPacket;
struct AVRational;
@@ -44,6 +42,7 @@ struct AVStream;
namespace media {
+class BitstreamConverter;
class FFmpegDemuxer;
// Forward declaration for scoped_ptr_malloc.
@@ -194,6 +193,9 @@ class FFmpegDemuxer : public Demuxer,
// Latest timestamp read on the demuxer thread.
base::TimeDelta current_timestamp_;
+ // Used to translate bitstream formats. Lazily allocated.
+ scoped_ptr<BitstreamConverter> bitstream_converter_;
+
// Two vector of streams:
// - |streams_| is indexed for the Demuxer interface GetStream(), which only
// contains supported streams and no NULL entries.
diff --git a/media/filters/ffmpeg_demuxer_unittest.cc b/media/filters/ffmpeg_demuxer_unittest.cc
index 6a233b6..4fa0647 100644
--- a/media/filters/ffmpeg_demuxer_unittest.cc
+++ b/media/filters/ffmpeg_demuxer_unittest.cc
@@ -14,7 +14,7 @@
#include "media/filters/ffmpeg_demuxer.h"
#include "testing/gtest/include/gtest/gtest.h"
-using ::testing::_;
+using ::testing::AnyNumber;
using ::testing::DoAll;
using ::testing::InSequence;
using ::testing::Invoke;
@@ -23,6 +23,7 @@ using ::testing::Return;
using ::testing::SetArgumentPointee;
using ::testing::StrictMock;
using ::testing::WithArgs;
+using ::testing::_;
namespace media {
@@ -288,109 +289,120 @@ TEST_F(FFmpegDemuxerTest, Initialize_Successful) {
EXPECT_EQ(&streams_[AV_STREAM_AUDIO], av_stream_provider->GetAVStream());
}
-TEST_F(FFmpegDemuxerTest, Read) {
- // We're testing the following:
- //
- // 1) The demuxer immediately frees packets it doesn't care about and keeps
- // reading until it finds a packet it cares about.
- // 2) The demuxer doesn't free packets that we read from it.
- // 3) On end of stream, the demuxer queues end of stream packets on every
- // stream.
- //
- // Since we can't test which packets are being freed, we use check points to
- // infer that the correct packets have been freed.
+TEST_F(FFmpegDemuxerTest, Read_DiscardUninteresting) {
+ // We test that on a successful audio packet read, that the packet is
+ // duplicated (FFmpeg memory management safety), and a copy of it ends up in
+ // the DemuxerStream.
{
SCOPED_TRACE("");
InitializeDemuxer();
}
- // Get our streams.
- scoped_refptr<DemuxerStream> video = demuxer_->GetStream(DS_STREAM_VIDEO);
- scoped_refptr<DemuxerStream> audio = demuxer_->GetStream(DS_STREAM_AUDIO);
- ASSERT_TRUE(video);
- ASSERT_TRUE(audio);
-
- // Expect all calls in sequence.
- InSequence s;
+ // Ignore all AVFreePacket() calls. We check this elsewhere.
+ EXPECT_CALL(*MockFFmpeg::get(), AVFreePacket(_)).Times(AnyNumber());
// The demuxer will read a data packet which will get immediately freed,
- // followed by reading an audio packet...
+ // followed by a read error to end the reading.
+ InSequence s;
EXPECT_CALL(*MockFFmpeg::get(), AVReadFrame(&format_context_, _))
- .WillOnce(CreatePacket(AV_STREAM_DATA, kNullData, 0));
- EXPECT_CALL(*MockFFmpeg::get(), AVFreePacket(_)).WillOnce(FreePacket());
+ .WillOnce(CreatePacketNoCount(AV_STREAM_DATA, kNullData, 0));
EXPECT_CALL(*MockFFmpeg::get(), AVReadFrame(&format_context_, _))
- .WillOnce(CreatePacket(AV_STREAM_AUDIO, kAudioData, kDataSize));
- EXPECT_CALL(*MockFFmpeg::get(), AVDupPacket(_))
- .WillOnce(Return(0));
+ .WillOnce(Return(AVERROR_IO));
- // ...then we'll free it with some sanity checkpoints...
- EXPECT_CALL(*MockFFmpeg::get(), CheckPoint(1));
- EXPECT_CALL(*MockFFmpeg::get(), AVFreePacket(_)).WillOnce(FreePacket());
- EXPECT_CALL(*MockFFmpeg::get(), CheckPoint(2));
+ // Attempt a read from the audio stream and run the message loop until done.
+ scoped_refptr<DemuxerStream> audio = demuxer_->GetStream(DS_STREAM_AUDIO);
+ scoped_refptr<DemuxerStreamReader> reader(new DemuxerStreamReader());
+ reader->Read(audio);
+ message_loop_.RunAllPending();
- // ...then we'll read a video packet...
- EXPECT_CALL(*MockFFmpeg::get(), AVReadFrame(&format_context_, _))
- .WillOnce(CreatePacket(AV_STREAM_VIDEO, kVideoData, kDataSize));
- EXPECT_CALL(*MockFFmpeg::get(), AVDupPacket(_))
- .WillOnce(Return(0));
+ EXPECT_TRUE(reader->called());
+ ASSERT_TRUE(reader->buffer());
+ EXPECT_TRUE(reader->buffer()->IsEndOfStream());
+}
- // ...then we'll free it with some sanity checkpoints...
- EXPECT_CALL(*MockFFmpeg::get(), CheckPoint(3));
- EXPECT_CALL(*MockFFmpeg::get(), AVFreePacket(_)).WillOnce(FreePacket());
- EXPECT_CALL(*MockFFmpeg::get(), CheckPoint(4));
+TEST_F(FFmpegDemuxerTest, Read_Audio) {
+ // We test that on a successful audio packet read, that the packet is
+ // duplicated (FFmpeg memory management safety), and a copy of it ends up in
+ // the DemuxerStream.
+ {
+ SCOPED_TRACE("");
+ InitializeDemuxer();
+ }
- // ...then we'll simulate end of stream. Note that a packet isn't "created"
- // in this situation so there is no outstanding packet. However an end of
- // stream packet is created for each stream, which means av_free_packet()
- // will still be called twice.
+ // Ignore all AVFreePacket() calls. We check this via valgrind.
+ EXPECT_CALL(*MockFFmpeg::get(), AVFreePacket(_)).Times(AnyNumber());
+
+ // The demuxer will read a data packet which will get immediately freed,
+ // followed by reading an audio packet...
EXPECT_CALL(*MockFFmpeg::get(), AVReadFrame(&format_context_, _))
- .WillOnce(Return(AVERROR_IO));
- EXPECT_CALL(*MockFFmpeg::get(), AVFreePacket(_));
- EXPECT_CALL(*MockFFmpeg::get(), CheckPoint(5));
- EXPECT_CALL(*MockFFmpeg::get(), AVFreePacket(_));
- EXPECT_CALL(*MockFFmpeg::get(), CheckPoint(6));
+ .WillOnce(CreatePacketNoCount(AV_STREAM_AUDIO, kAudioData, kDataSize));
+ EXPECT_CALL(*MockFFmpeg::get(), AVDupPacket(_))
+ .WillOnce(Return(0));
// Attempt a read from the audio stream and run the message loop until done.
+ scoped_refptr<DemuxerStream> audio = demuxer_->GetStream(DS_STREAM_AUDIO);
scoped_refptr<DemuxerStreamReader> reader(new DemuxerStreamReader());
reader->Read(audio);
message_loop_.RunAllPending();
+
EXPECT_TRUE(reader->called());
ASSERT_TRUE(reader->buffer());
EXPECT_FALSE(reader->buffer()->IsDiscontinuous());
ASSERT_EQ(kDataSize, reader->buffer()->GetDataSize());
EXPECT_EQ(0, memcmp(kAudioData, reader->buffer()->GetData(),
reader->buffer()->GetDataSize()));
+}
- // We shouldn't have freed the audio packet yet.
- MockFFmpeg::get()->CheckPoint(1);
+TEST_F(FFmpegDemuxerTest, Read_Video) {
+ // We test that on a successful video packet read, that the packet is
+ // duplicated (FFmpeg memory management safety), and a copy of it ends up in
+ // the DemuxerStream.
+ {
+ SCOPED_TRACE("");
+ InitializeDemuxer();
+ }
- // Manually release the last reference to the buffer.
- reader->Reset();
- message_loop_.RunAllPending();
- MockFFmpeg::get()->CheckPoint(2);
+ // Ignore all AVFreePacket() calls. We check this via valgrind.
+ EXPECT_CALL(*MockFFmpeg::get(), AVFreePacket(_)).Times(AnyNumber());
+
+ // Simulate a successful frame read.
+ EXPECT_CALL(*MockFFmpeg::get(), AVReadFrame(&format_context_, _))
+ .WillOnce(CreatePacketNoCount(AV_STREAM_VIDEO, kVideoData, kDataSize));
+ EXPECT_CALL(*MockFFmpeg::get(), AVDupPacket(_))
+ .WillOnce(Return(0));
// Attempt a read from the video stream and run the message loop until done.
+ scoped_refptr<DemuxerStream> video = demuxer_->GetStream(DS_STREAM_VIDEO);
+ scoped_refptr<DemuxerStreamReader> reader(new DemuxerStreamReader());
reader->Read(video);
message_loop_.RunAllPending();
+
EXPECT_TRUE(reader->called());
ASSERT_TRUE(reader->buffer());
EXPECT_FALSE(reader->buffer()->IsDiscontinuous());
ASSERT_EQ(kDataSize, reader->buffer()->GetDataSize());
EXPECT_EQ(0, memcmp(kVideoData, reader->buffer()->GetData(),
reader->buffer()->GetDataSize()));
+}
- // We shouldn't have freed the video packet yet.
- MockFFmpeg::get()->CheckPoint(3);
+TEST_F(FFmpegDemuxerTest, Read_EndOfStream) {
+ // On end of stream, a new, empty, AVPackets are created without any data for
+ // each stream and enqueued into the Buffer stream. Verify that these are
+ // indeed inserted.
+ {
+ SCOPED_TRACE("");
+ InitializeDemuxer();
+ }
- // Manually release the last reference to the buffer and verify it was freed.
- reader->Reset();
- message_loop_.RunAllPending();
- MockFFmpeg::get()->CheckPoint(4);
+ // Ignore all AVFreePacket() calls. We check this via valgrind.
+ EXPECT_CALL(*MockFFmpeg::get(), AVFreePacket(_)).Times(AnyNumber());
- // We should now expect an end of stream buffer in both the audio and video
- // streams.
+ EXPECT_CALL(*MockFFmpeg::get(), AVReadFrame(&format_context_, _))
+ .WillOnce(Return(AVERROR_IO));
- // Attempt a read from the audio stream and run the message loop until done.
+ // We should now expect an end of stream buffer.
+ scoped_refptr<DemuxerStream> audio = demuxer_->GetStream(DS_STREAM_AUDIO);
+ scoped_refptr<DemuxerStreamReader> reader(new DemuxerStreamReader());
reader->Read(audio);
message_loop_.RunAllPending();
EXPECT_TRUE(reader->called());
@@ -398,25 +410,6 @@ TEST_F(FFmpegDemuxerTest, Read) {
EXPECT_TRUE(reader->buffer()->IsEndOfStream());
EXPECT_TRUE(reader->buffer()->GetData() == NULL);
EXPECT_EQ(0u, reader->buffer()->GetDataSize());
-
- // Manually release buffer, which should release any remaining AVPackets.
- reader->Reset();
- message_loop_.RunAllPending();
- MockFFmpeg::get()->CheckPoint(5);
-
- // Attempt a read from the audio stream and run the message loop until done.
- reader->Read(video);
- message_loop_.RunAllPending();
- EXPECT_TRUE(reader->called());
- ASSERT_TRUE(reader->buffer());
- EXPECT_TRUE(reader->buffer()->IsEndOfStream());
- EXPECT_TRUE(reader->buffer()->GetData() == NULL);
- EXPECT_EQ(0u, reader->buffer()->GetDataSize());
-
- // Manually release buffer, which should release any remaining AVPackets.
- reader->Reset();
- message_loop_.RunAllPending();
- MockFFmpeg::get()->CheckPoint(6);
}
TEST_F(FFmpegDemuxerTest, Seek) {
@@ -442,31 +435,29 @@ TEST_F(FFmpegDemuxerTest, Seek) {
const int64 kExpectedTimestamp = 1234;
const int64 kExpectedFlags = 0;
+ // Ignore all AVFreePacket() calls. We check this via valgrind.
+ EXPECT_CALL(*MockFFmpeg::get(), AVFreePacket(_)).Times(AnyNumber());
+
// Expect all calls in sequence.
InSequence s;
// First we'll read a video packet that causes two audio packets to be queued
// inside FFmpegDemuxer...
EXPECT_CALL(*MockFFmpeg::get(), AVReadFrame(&format_context_, _))
- .WillOnce(CreatePacket(AV_STREAM_AUDIO, kAudioData, kDataSize));
+ .WillOnce(CreatePacketNoCount(AV_STREAM_AUDIO, kAudioData, kDataSize));
EXPECT_CALL(*MockFFmpeg::get(), AVDupPacket(_))
.WillOnce(Return(0));
EXPECT_CALL(*MockFFmpeg::get(), AVReadFrame(&format_context_, _))
- .WillOnce(CreatePacket(AV_STREAM_AUDIO, kAudioData, kDataSize));
+ .WillOnce(CreatePacketNoCount(AV_STREAM_AUDIO, kAudioData, kDataSize));
EXPECT_CALL(*MockFFmpeg::get(), AVDupPacket(_))
.WillOnce(Return(0));
EXPECT_CALL(*MockFFmpeg::get(), AVReadFrame(&format_context_, _))
- .WillOnce(CreatePacket(AV_STREAM_VIDEO, kVideoData, kDataSize));
+ .WillOnce(CreatePacketNoCount(AV_STREAM_VIDEO, kVideoData, kDataSize));
EXPECT_CALL(*MockFFmpeg::get(), AVDupPacket(_))
.WillOnce(Return(0));
- // ...then we'll release our video packet...
- EXPECT_CALL(*MockFFmpeg::get(), AVFreePacket(_)).WillOnce(FreePacket());
EXPECT_CALL(*MockFFmpeg::get(), CheckPoint(1));
- // ...then we'll seek, which should release the previously queued packets...
- EXPECT_CALL(*MockFFmpeg::get(), AVFreePacket(_)).WillOnce(FreePacket());
- EXPECT_CALL(*MockFFmpeg::get(), AVFreePacket(_)).WillOnce(FreePacket());
// ... then we'll call Seek() to get around the first seek hack...
//
@@ -488,27 +479,23 @@ TEST_F(FFmpegDemuxerTest, Seek) {
// ...followed by two audio packet reads we'll trigger...
EXPECT_CALL(*MockFFmpeg::get(), AVReadFrame(&format_context_, _))
- .WillOnce(CreatePacket(AV_STREAM_AUDIO, kAudioData, kDataSize));
+ .WillOnce(CreatePacketNoCount(AV_STREAM_AUDIO, kAudioData, kDataSize));
EXPECT_CALL(*MockFFmpeg::get(), AVDupPacket(_))
.WillOnce(Return(0));
- EXPECT_CALL(*MockFFmpeg::get(), AVFreePacket(_)).WillOnce(FreePacket());
EXPECT_CALL(*MockFFmpeg::get(), AVReadFrame(&format_context_, _))
- .WillOnce(CreatePacket(AV_STREAM_AUDIO, kAudioData, kDataSize));
+ .WillOnce(CreatePacketNoCount(AV_STREAM_AUDIO, kAudioData, kDataSize));
EXPECT_CALL(*MockFFmpeg::get(), AVDupPacket(_))
.WillOnce(Return(0));
- EXPECT_CALL(*MockFFmpeg::get(), AVFreePacket(_)).WillOnce(FreePacket());
// ...followed by two video packet reads...
EXPECT_CALL(*MockFFmpeg::get(), AVReadFrame(&format_context_, _))
- .WillOnce(CreatePacket(AV_STREAM_VIDEO, kVideoData, kDataSize));
+ .WillOnce(CreatePacketNoCount(AV_STREAM_VIDEO, kVideoData, kDataSize));
EXPECT_CALL(*MockFFmpeg::get(), AVDupPacket(_))
.WillOnce(Return(0));
- EXPECT_CALL(*MockFFmpeg::get(), AVFreePacket(_)).WillOnce(FreePacket());
EXPECT_CALL(*MockFFmpeg::get(), AVReadFrame(&format_context_, _))
- .WillOnce(CreatePacket(AV_STREAM_VIDEO, kVideoData, kDataSize));
+ .WillOnce(CreatePacketNoCount(AV_STREAM_VIDEO, kVideoData, kDataSize));
EXPECT_CALL(*MockFFmpeg::get(), AVDupPacket(_))
.WillOnce(Return(0));
- EXPECT_CALL(*MockFFmpeg::get(), AVFreePacket(_)).WillOnce(FreePacket());
// ...and finally a sanity checkpoint to make sure everything was released.
EXPECT_CALL(*MockFFmpeg::get(), CheckPoint(3));
@@ -662,19 +649,19 @@ TEST_F(FFmpegDemuxerTest, DisableAudioStream) {
demuxer_->OnReceivedMessage(kMsgDisableAudio);
message_loop_.RunAllPending();
+ // Ignore all AVFreePacket() calls. We check this via valgrind.
+ EXPECT_CALL(*MockFFmpeg::get(), AVFreePacket(_)).Times(AnyNumber());
+
// Expect all calls in sequence.
InSequence s;
// The demuxer will read an audio packet which will get immediately freed.
EXPECT_CALL(*MockFFmpeg::get(), AVReadFrame(&format_context_, _))
- .WillOnce(CreatePacket(AV_STREAM_AUDIO, kNullData, 0));
- EXPECT_CALL(*MockFFmpeg::get(), AVFreePacket(_)).WillOnce(FreePacket());
+ .WillOnce(CreatePacketNoCount(AV_STREAM_AUDIO, kNullData, 0));
// Then an end-of-stream packet is read.
EXPECT_CALL(*MockFFmpeg::get(), AVReadFrame(&format_context_, _))
.WillOnce(Return(AVERROR_IO));
- EXPECT_CALL(*MockFFmpeg::get(), AVFreePacket(_));
- EXPECT_CALL(*MockFFmpeg::get(), AVFreePacket(_));
// Get our streams.
scoped_refptr<DemuxerStream> video = demuxer_->GetStream(DS_STREAM_VIDEO);
diff --git a/media/filters/ffmpeg_video_decode_engine.cc b/media/filters/ffmpeg_video_decode_engine.cc
new file mode 100644
index 0000000..976e9ab
--- /dev/null
+++ b/media/filters/ffmpeg_video_decode_engine.cc
@@ -0,0 +1,125 @@
+// Copyright (c) 2009 The Chromium Authors. All rights reserved. Use of this
+// source code is governed by a BSD-style license that can be found in the
+// LICENSE file.
+
+#include "media/filters/ffmpeg_video_decode_engine.h"
+
+#include "base/task.h"
+#include "media/base/callback.h"
+#include "media/base/video_frame_impl.h"
+#include "media/ffmpeg/ffmpeg_util.h"
+#include "media/filters/ffmpeg_common.h"
+#include "media/filters/ffmpeg_demuxer.h"
+
+namespace media {
+
+FFmpegVideoDecodeEngine::FFmpegVideoDecodeEngine()
+ : codec_context_(NULL),
+ state_(kCreated) {
+}
+
+FFmpegVideoDecodeEngine::~FFmpegVideoDecodeEngine() {
+}
+
+void FFmpegVideoDecodeEngine::Initialize(AVStream* stream, Task* done_cb) {
+ AutoTaskRunner done_runner(done_cb);
+
+ // Always try to use two threads for video decoding. There is little reason
+ // not to since current day CPUs tend to be multi-core and we measured
+ // performance benefits on older machines such as P4s with hyperthreading.
+ //
+ // Handling decoding on separate threads also frees up the pipeline thread to
+ // continue processing. Although it'd be nice to have the option of a single
+ // decoding thread, FFmpeg treats having one thread the same as having zero
+ // threads (i.e., avcodec_decode_video() will execute on the calling thread).
+ // Yet another reason for having two threads :)
+ //
+ // TODO(scherkus): some video codecs might not like avcodec_thread_init()
+ // being called on them... should attempt to find out which ones those are!
+ static const int kDecodeThreads = 2;
+
+ CHECK(state_ == kCreated);
+
+ codec_context_ = stream->codec;
+ codec_context_->flags2 |= CODEC_FLAG2_FAST; // Enable faster H264 decode.
+ // Enable motion vector search (potentially slow), strong deblocking filter
+ // for damaged macroblocks, and set our error detection sensitivity.
+ codec_context_->error_concealment = FF_EC_GUESS_MVS | FF_EC_DEBLOCK;
+ codec_context_->error_recognition = FF_ER_CAREFUL;
+
+ // Serialize calls to avcodec_open().
+ AVCodec* codec = avcodec_find_decoder(codec_context_->codec_id);
+ {
+ AutoLock auto_lock(FFmpegLock::get()->lock());
+ if (codec &&
+ avcodec_thread_init(codec_context_, kDecodeThreads) >= 0 &&
+ avcodec_open(codec_context_, codec) >= 0) {
+ state_ = kNormal;
+ } else {
+ state_ = kError;
+ }
+ }
+}
+
+// Decodes one frame of video with the given buffer.
+void FFmpegVideoDecodeEngine::DecodeFrame(const Buffer& buffer,
+ AVFrame* yuv_frame,
+ bool* got_frame,
+ Task* done_cb) {
+ AutoTaskRunner done_runner(done_cb);
+
+ // Create a packet for input data.
+ // Due to FFmpeg API changes we no longer have const read-only pointers.
+ AVPacket packet;
+ av_init_packet(&packet);
+ packet.data = const_cast<uint8*>(buffer.GetData());
+ packet.size = buffer.GetDataSize();
+
+ // We don't allocate AVFrame on the stack since different versions of FFmpeg
+ // may change the size of AVFrame, causing stack corruption. The solution is
+ // to let FFmpeg allocate the structure via avcodec_alloc_frame().
+ int frame_decoded = 0;
+ int result =
+ avcodec_decode_video2(codec_context_, yuv_frame, &frame_decoded, &packet);
+
+ // Log the problem if we can't decode a video frame and exit early.
+ if (result < 0) {
+ LOG(INFO) << "Error decoding a video frame with timestamp: "
+ << buffer.GetTimestamp().InMicroseconds() << " us"
+ << " , duration: "
+ << buffer.GetDuration().InMicroseconds() << " us"
+ << " , packet size: "
+ << buffer.GetDataSize() << " bytes";
+ *got_frame = false;
+ } else {
+ // If frame_decoded == 0, then no frame was produced.
+ *got_frame = frame_decoded != 0;
+ }
+}
+
+void FFmpegVideoDecodeEngine::Flush(Task* done_cb) {
+ AutoTaskRunner done_runner(done_cb);
+
+ avcodec_flush_buffers(codec_context_);
+}
+
+VideoSurface::Format FFmpegVideoDecodeEngine::GetSurfaceFormat() const {
+ // J (Motion JPEG) versions of YUV are full range 0..255.
+ // Regular (MPEG) YUV is 16..240.
+ // For now we will ignore the distinction and treat them the same.
+ switch (codec_context_->pix_fmt) {
+ case PIX_FMT_YUV420P:
+ case PIX_FMT_YUVJ420P:
+ return VideoSurface::YV12;
+ break;
+ case PIX_FMT_YUV422P:
+ case PIX_FMT_YUVJ422P:
+ return VideoSurface::YV16;
+ break;
+ default:
+ // TODO(scherkus): More formats here?
+ return VideoSurface::INVALID;
+ }
+}
+
+} // namespace media
diff --git a/media/filters/ffmpeg_video_decode_engine.h b/media/filters/ffmpeg_video_decode_engine.h
new file mode 100644
index 0000000..b8338ab
--- /dev/null
+++ b/media/filters/ffmpeg_video_decode_engine.h
@@ -0,0 +1,49 @@
+// Copyright (c) 2009 The Chromium Authors. All rights reserved. Use of this
+// source code is governed by a BSD-style license that can be found in the
+// LICENSE file.
+
+#ifndef MEDIA_FILTERS_FFMPEG_VIDEO_DECODE_ENGINE_H_
+#define MEDIA_FILTERS_FFMPEG_VIDEO_DECODE_ENGINE_H_
+
+#include "media/filters/video_decode_engine.h"
+
+// FFmpeg types.
+struct AVCodecContext;
+struct AVFrame;
+struct AVStream;
+
+namespace media {
+
+class InputBuffer;
+class OmxCodec;
+
+class FFmpegVideoDecodeEngine : public VideoDecodeEngine {
+ public:
+ FFmpegVideoDecodeEngine();
+ virtual ~FFmpegVideoDecodeEngine();
+
+ // Implementation of the VideoDecodeEngine Interface.
+ virtual void Initialize(AVStream* stream, Task* done_cb);
+ virtual void DecodeFrame(const Buffer& buffer, AVFrame* yuv_frame,
+ bool* got_result, Task* done_cb);
+ virtual void Flush(Task* done_cb);
+ virtual VideoSurface::Format GetSurfaceFormat() const;
+
+ virtual State state() const { return state_; }
+
+ virtual AVCodecContext* codec_context() const { return codec_context_; }
+
+ virtual void SetCodecContextForTest(AVCodecContext* context) {
+ codec_context_ = context;
+ }
+
+ private:
+ AVCodecContext* codec_context_;
+ State state_;
+
+ DISALLOW_COPY_AND_ASSIGN(FFmpegVideoDecodeEngine);
+};
+
+} // namespace media
+
+#endif // MEDIA_FILTERS_FFMPEG_VIDEO_DECODE_ENGINE_H_
diff --git a/media/filters/ffmpeg_video_decode_engine_unittest.cc b/media/filters/ffmpeg_video_decode_engine_unittest.cc
index 76c43fd..4d56866 100644
--- a/media/filters/ffmpeg_video_decode_engine_unittest.cc
+++ b/media/filters/ffmpeg_video_decode_engine_unittest.cc
@@ -6,6 +6,7 @@
#include "media/base/data_buffer.h"
#include "media/base/mock_ffmpeg.h"
#include "media/base/mock_task.h"
+#include "media/filters/ffmpeg_video_decode_engine.h"
#include "media/filters/ffmpeg_video_decoder.h"
#include "testing/gmock/include/gmock/gmock.h"
#include "testing/gtest/include/gtest/gtest.h"
diff --git a/media/filters/ffmpeg_video_decoder.cc b/media/filters/ffmpeg_video_decoder.cc
index 6390216..453eda2 100644
--- a/media/filters/ffmpeg_video_decoder.cc
+++ b/media/filters/ffmpeg_video_decoder.cc
@@ -4,146 +4,25 @@
#include "media/filters/ffmpeg_video_decoder.h"
-#include "base/task.h"
-#include "base/waitable_event.h"
-#include "media/base/callback.h"
-#include "media/base/limits.h"
-#include "media/base/video_frame_impl.h"
-#include "media/ffmpeg/ffmpeg_util.h"
-#include "media/filters/ffmpeg_common.h"
-#include "media/filters/ffmpeg_demuxer.h"
+#include "media/base/media_format.h"
+#include "media/filters/ffmpeg_common.h" // For kFFmpegVideo.
+#include "media/filters/ffmpeg_video_decode_engine.h"
namespace media {
-FFmpegVideoDecodeEngine::FFmpegVideoDecodeEngine()
- : codec_context_(NULL),
- state_(kCreated) {
+FFmpegVideoDecoder::FFmpegVideoDecoder(FFmpegVideoDecodeEngine* engine)
+ : VideoDecoderImpl(engine) {
}
-FFmpegVideoDecodeEngine::~FFmpegVideoDecodeEngine() {
-}
-
-void FFmpegVideoDecodeEngine::Initialize(AVStream* stream, Task* done_cb) {
- AutoTaskRunner done_runner(done_cb);
-
- // Always try to use two threads for video decoding. There is little reason
- // not to since current day CPUs tend to be multi-core and we measured
- // performance benefits on older machines such as P4s with hyperthreading.
- //
- // Handling decoding on separate threads also frees up the pipeline thread to
- // continue processing. Although it'd be nice to have the option of a single
- // decoding thread, FFmpeg treats having one thread the same as having zero
- // threads (i.e., avcodec_decode_video() will execute on the calling thread).
- // Yet another reason for having two threads :)
- //
- // TODO(scherkus): some video codecs might not like avcodec_thread_init()
- // being called on them... should attempt to find out which ones those are!
- static const int kDecodeThreads = 2;
-
- CHECK(state_ == kCreated);
-
- codec_context_ = stream->codec;
- codec_context_->flags2 |= CODEC_FLAG2_FAST; // Enable faster H264 decode.
- // Enable motion vector search (potentially slow), strong deblocking filter
- // for damaged macroblocks, and set our error detection sensitivity.
- codec_context_->error_concealment = FF_EC_GUESS_MVS | FF_EC_DEBLOCK;
- codec_context_->error_recognition = FF_ER_CAREFUL;
-
- // Serialize calls to avcodec_open().
- AVCodec* codec = avcodec_find_decoder(codec_context_->codec_id);
- {
- AutoLock auto_lock(FFmpegLock::get()->lock());
- if (codec &&
- avcodec_thread_init(codec_context_, kDecodeThreads) >= 0 &&
- avcodec_open(codec_context_, codec) >= 0) {
- state_ = kNormal;
- } else {
- state_ = kError;
- }
- }
-}
-
-// Decodes one frame of video with the given buffer.
-void FFmpegVideoDecodeEngine::DecodeFrame(const Buffer& buffer,
- AVFrame* yuv_frame,
- bool* got_frame,
- Task* done_cb) {
- AutoTaskRunner done_runner(done_cb);
-
- // Create a packet for input data.
- // Due to FFmpeg API changes we no longer have const read-only pointers.
- //
- // TODO(ajwong): This is dangerous since AVPacket may change size with
- // different ffmpeg versions. Use the alloca verison.
- AVPacket packet;
- av_init_packet(&packet);
- packet.data = const_cast<uint8*>(buffer.GetData());
- packet.size = buffer.GetDataSize();
-
- // We don't allocate AVFrame on the stack since different versions of FFmpeg
- // may change the size of AVFrame, causing stack corruption. The solution is
- // to let FFmpeg allocate the structure via avcodec_alloc_frame().
- int frame_decoded = 0;
- int result =
- avcodec_decode_video2(codec_context_, yuv_frame, &frame_decoded, &packet);
-
- // Log the problem if we can't decode a video frame and exit early.
- if (result < 0) {
- LOG(INFO) << "Error decoding a video frame with timestamp: "
- << buffer.GetTimestamp().InMicroseconds() << " us"
- << " , duration: "
- << buffer.GetDuration().InMicroseconds() << " us"
- << " , packet size: "
- << buffer.GetDataSize() << " bytes";
- *got_frame = false;
- } else {
- // If frame_decoded == 0, then no frame was produced.
- *got_frame = frame_decoded != 0;
- }
-}
-
-void FFmpegVideoDecodeEngine::Flush(Task* done_cb) {
- AutoTaskRunner done_runner(done_cb);
-
- avcodec_flush_buffers(codec_context_);
-}
-
-VideoSurface::Format FFmpegVideoDecodeEngine::GetSurfaceFormat() const {
- // J (Motion JPEG) versions of YUV are full range 0..255.
- // Regular (MPEG) YUV is 16..240.
- // For now we will ignore the distinction and treat them the same.
- switch (codec_context_->pix_fmt) {
- case PIX_FMT_YUV420P:
- case PIX_FMT_YUVJ420P:
- return VideoSurface::YV12;
- break;
- case PIX_FMT_YUV422P:
- case PIX_FMT_YUVJ422P:
- return VideoSurface::YV16;
- break;
- default:
- // TODO(scherkus): More formats here?
- return VideoSurface::INVALID;
- }
+FFmpegVideoDecoder::~FFmpegVideoDecoder() {
}
// static
FilterFactory* FFmpegVideoDecoder::CreateFactory() {
- return new FilterFactoryImpl1<FFmpegVideoDecoder, VideoDecodeEngine*>(
+ return new FilterFactoryImpl1<FFmpegVideoDecoder, FFmpegVideoDecodeEngine*>(
new FFmpegVideoDecodeEngine());
}
-FFmpegVideoDecoder::FFmpegVideoDecoder(VideoDecodeEngine* engine)
- : width_(0),
- height_(0),
- time_base_(new AVRational()),
- state_(kNormal),
- decode_engine_(engine) {
-}
-
-FFmpegVideoDecoder::~FFmpegVideoDecoder() {
-}
-
// static
bool FFmpegVideoDecoder::IsMediaFormatSupported(const MediaFormat& format) {
std::string mime_type;
@@ -151,290 +30,4 @@ bool FFmpegVideoDecoder::IsMediaFormatSupported(const MediaFormat& format) {
mime_type::kFFmpegVideo == mime_type;
}
-void FFmpegVideoDecoder::DoInitialize(DemuxerStream* demuxer_stream,
- bool* success,
- Task* done_cb) {
- AutoTaskRunner done_runner(done_cb);
- *success = false;
-
- // Get the AVStream by querying for the provider interface.
- AVStreamProvider* av_stream_provider;
- if (!demuxer_stream->QueryInterface(&av_stream_provider)) {
- return;
- }
- AVStream* av_stream = av_stream_provider->GetAVStream();
-
- *time_base_ = av_stream->time_base;
-
- // TODO(ajwong): We don't need these extra variables if |media_format_| has
- // them. Remove.
- width_ = av_stream->codec->width;
- height_ = av_stream->codec->height;
- if (width_ > Limits::kMaxDimension || height_ > Limits::kMaxDimension ||
- width_ * height_ > Limits::kMaxCanvas)
- return;
-
- media_format_.SetAsString(MediaFormat::kMimeType,
- mime_type::kUncompressedVideo);
- media_format_.SetAsInteger(MediaFormat::kWidth, width_);
- media_format_.SetAsInteger(MediaFormat::kHeight, height_);
-
- decode_engine_->Initialize(
- av_stream,
- NewRunnableMethod(this,
- &FFmpegVideoDecoder::OnInitializeComplete,
- success,
- done_runner.release()));
-}
-
-void FFmpegVideoDecoder::OnInitializeComplete(bool* success, Task* done_cb) {
- AutoTaskRunner done_runner(done_cb);
-
- *success = decode_engine_->state() == FFmpegVideoDecodeEngine::kNormal;
-}
-
-void FFmpegVideoDecoder::DoSeek(base::TimeDelta time, Task* done_cb) {
- // Everything in the presentation time queue is invalid, clear the queue.
- while (!pts_heap_.IsEmpty())
- pts_heap_.Pop();
-
- // We're back where we started. It should be completely safe to flush here
- // since DecoderBase uses |expecting_discontinuous_| to verify that the next
- // time DoDecode() is called we will have a discontinuous buffer.
- //
- // TODO(ajwong): Should we put a guard here to prevent leaving kError.
- state_ = kNormal;
-
- decode_engine_->Flush(done_cb);
-}
-
-void FFmpegVideoDecoder::DoDecode(Buffer* buffer, Task* done_cb) {
- AutoTaskRunner done_runner(done_cb);
-
- // TODO(ajwong): This DoDecode and OnDecodeComplete set of functions is too
- // complicated to easily unittest. The test becomes fragile. Try to find a
- // way to reorganize into smaller units for testing.
-
- // During decode, because reads are issued asynchronously, it is possible to
- // receive multiple end of stream buffers since each read is acked. When the
- // first end of stream buffer is read, FFmpeg may still have frames queued
- // up in the decoder so we need to go through the decode loop until it stops
- // giving sensible data. After that, the decoder should output empty
- // frames. There are three states the decoder can be in:
- //
- // kNormal: This is the starting state. Buffers are decoded. Decode errors
- // are discarded.
- // kFlushCodec: There isn't any more input data. Call avcodec_decode_video2
- // until no more data is returned to flush out remaining
- // frames. The input buffer is ignored at this point.
- // kDecodeFinished: All calls return empty frames.
- //
- // These are the possible state transitions.
- //
- // kNormal -> kFlushCodec:
- // When buffer->IsEndOfStream() is first true.
- // kNormal -> kDecodeFinished:
- // A catastrophic failure occurs, and decoding needs to stop.
- // kFlushCodec -> kDecodeFinished:
- // When avcodec_decode_video2() returns 0 data or errors out.
- // (any state) -> kNormal:
- // Any time buffer->IsDiscontinuous() is true.
- //
- // If the decoding is finished, we just always return empty frames.
- if (state_ == kDecodeFinished) {
- EnqueueEmptyFrame();
- return;
- }
-
- // Transition to kFlushCodec on the first end of stream buffer.
- if (state_ == kNormal && buffer->IsEndOfStream()) {
- state_ = kFlushCodec;
- }
-
- // Push all incoming timestamps into the priority queue as long as we have
- // not yet received an end of stream buffer. It is important that this line
- // stay below the state transition into kFlushCodec done above.
- //
- // TODO(ajwong): This push logic, along with the pop logic below needs to
- // be reevaluated to correctly handle decode errors.
- if (state_ == kNormal) {
- pts_heap_.Push(buffer->GetTimestamp());
- }
-
- // Otherwise, attempt to decode a single frame.
- AVFrame* yuv_frame = avcodec_alloc_frame();
- bool* got_frame = new bool;
- decode_engine_->DecodeFrame(
- *buffer,
- yuv_frame,
- got_frame,
- NewRunnableMethod(this,
- &FFmpegVideoDecoder::OnDecodeComplete,
- yuv_frame,
- got_frame,
- done_runner.release()));
-}
-
-void FFmpegVideoDecoder::OnDecodeComplete(AVFrame* yuv_frame, bool* got_frame,
- Task* done_cb) {
- // Note: The |done_runner| must be declared *last* to ensure proper
- // destruction order.
- scoped_ptr_malloc<AVFrame, ScopedPtrAVFree> yuv_frame_deleter(yuv_frame);
- scoped_ptr<bool> got_frame_deleter(got_frame);
- AutoTaskRunner done_runner(done_cb);
-
- // If we actually got data back, enqueue a frame.
- if (*got_frame) {
- last_pts_ = FindPtsAndDuration(*time_base_, pts_heap_, last_pts_,
- yuv_frame);
-
- // Pop off a pts on a successful decode since we are "using up" one
- // timestamp.
- //
- // TODO(ajwong): Do we need to pop off a pts when avcodec_decode_video2()
- // returns < 0? The rationale is that when get_picture_ptr == 0, we skip
- // popping a pts because no frame was produced. However, when
- // avcodec_decode_video2() returns false, it is a decode error, which
- // if it means a frame is dropped, may require us to pop one more time.
- if (!pts_heap_.IsEmpty()) {
- pts_heap_.Pop();
- } else {
- NOTREACHED() << "Attempting to decode more frames than were input.";
- }
-
- if (!EnqueueVideoFrame(
- decode_engine_->GetSurfaceFormat(), last_pts_, yuv_frame)) {
- // On an EnqueueEmptyFrame error, error out the whole pipeline and
- // set the state to kDecodeFinished.
- SignalPipelineError();
- }
- } else {
- // When in kFlushCodec, any errored decode, or a 0-lengthed frame,
- // is taken as a signal to stop decoding.
- if (state_ == kFlushCodec) {
- state_ = kDecodeFinished;
- EnqueueEmptyFrame();
- }
- }
-}
-
-bool FFmpegVideoDecoder::EnqueueVideoFrame(VideoSurface::Format surface_format,
- const TimeTuple& time,
- const AVFrame* frame) {
- // TODO(fbarchard): Work around for FFmpeg http://crbug.com/27675
- // The decoder is in a bad state and not decoding correctly.
- // Checking for NULL avoids a crash in CopyPlane().
- if (!frame->data[VideoSurface::kYPlane] ||
- !frame->data[VideoSurface::kUPlane] ||
- !frame->data[VideoSurface::kVPlane]) {
- return true;
- }
-
- scoped_refptr<VideoFrame> video_frame;
- VideoFrameImpl::CreateFrame(surface_format, width_, height_,
- time.timestamp, time.duration, &video_frame);
- if (!video_frame) {
- return false;
- }
-
- // Copy the frame data since FFmpeg reuses internal buffers for AVFrame
- // output, meaning the data is only valid until the next
- // avcodec_decode_video() call.
- // TODO(scherkus): figure out pre-allocation/buffer cycling scheme.
- // TODO(scherkus): is there a cleaner way to figure out the # of planes?
- VideoSurface surface;
- if (!video_frame->Lock(&surface)) {
- return false;
- }
- CopyPlane(VideoSurface::kYPlane, surface, frame);
- CopyPlane(VideoSurface::kUPlane, surface, frame);
- CopyPlane(VideoSurface::kVPlane, surface, frame);
- video_frame->Unlock();
- EnqueueResult(video_frame);
- return true;
-}
-
-void FFmpegVideoDecoder::CopyPlane(size_t plane,
- const VideoSurface& surface,
- const AVFrame* frame) {
- DCHECK(surface.width % 2 == 0);
- const uint8* source = frame->data[plane];
- const size_t source_stride = frame->linesize[plane];
- uint8* dest = surface.data[plane];
- const size_t dest_stride = surface.strides[plane];
- size_t bytes_per_line = surface.width;
- size_t copy_lines = surface.height;
- if (plane != VideoSurface::kYPlane) {
- bytes_per_line /= 2;
- if (surface.format == VideoSurface::YV12) {
- copy_lines = (copy_lines + 1) / 2;
- }
- }
- DCHECK(bytes_per_line <= source_stride && bytes_per_line <= dest_stride);
- for (size_t i = 0; i < copy_lines; ++i) {
- memcpy(dest, source, bytes_per_line);
- source += source_stride;
- dest += dest_stride;
- }
-}
-
-void FFmpegVideoDecoder::EnqueueEmptyFrame() {
- scoped_refptr<VideoFrame> video_frame;
- VideoFrameImpl::CreateEmptyFrame(&video_frame);
- EnqueueResult(video_frame);
-}
-
-FFmpegVideoDecoder::TimeTuple FFmpegVideoDecoder::FindPtsAndDuration(
- const AVRational& time_base,
- const PtsHeap& pts_heap,
- const TimeTuple& last_pts,
- const AVFrame* frame) {
- TimeTuple pts;
-
- // Default repeat_pict to 0 because if there is no frame information,
- // we just assume the frame only plays for one time_base.
- int repeat_pict = 0;
-
- // First search the AVFrame for the pts. This is the most authoritative.
- // Make a special exclusion for the value frame->pts == 0. Though this
- // is technically a valid value, it seems a number of ffmpeg codecs will
- // mistakenly always set frame->pts to 0.
- //
- // Oh, and we have to cast AV_NOPTS_VALUE since it ends up becoming unsigned
- // because the value they use doesn't fit in a signed 64-bit number which
- // produces a signedness comparison warning on gcc.
- if (frame &&
- (frame->pts != static_cast<int64_t>(AV_NOPTS_VALUE)) &&
- (frame->pts != 0)) {
- pts.timestamp = ConvertTimestamp(time_base, frame->pts);
- repeat_pict = frame->repeat_pict;
- } else if (!pts_heap.IsEmpty()) {
- // If the frame did not have pts, try to get the pts from the
- // |pts_heap|.
- pts.timestamp = pts_heap.Top();
- } else {
- DCHECK(last_pts.timestamp != StreamSample::kInvalidTimestamp);
- DCHECK(last_pts.duration != StreamSample::kInvalidTimestamp);
- // Unable to read the pts from anywhere. Time to guess.
- pts.timestamp = last_pts.timestamp + last_pts.duration;
- }
-
- // Fill in the duration while accounting for repeated frames.
- //
- // TODO(ajwong): Make sure this formula is correct.
- pts.duration = ConvertTimestamp(time_base, 1 + repeat_pict);
-
- return pts;
-}
-
-void FFmpegVideoDecoder::SignalPipelineError() {
- host()->SetError(PIPELINE_ERROR_DECODE);
- state_ = kDecodeFinished;
-}
-
-void FFmpegVideoDecoder::SetVideoDecodeEngineForTest(
- VideoDecodeEngine* engine) {
- decode_engine_.reset(engine);
-}
-
} // namespace media
diff --git a/media/filters/ffmpeg_video_decoder.h b/media/filters/ffmpeg_video_decoder.h
index b35e173..8fbb74e 100644
--- a/media/filters/ffmpeg_video_decoder.h
+++ b/media/filters/ffmpeg_video_decoder.h
@@ -5,135 +5,21 @@
#ifndef MEDIA_FILTERS_FFMPEG_VIDEO_DECODER_H_
#define MEDIA_FILTERS_FFMPEG_VIDEO_DECODER_H_
-#include <queue>
-
-#include "media/base/factory.h"
-#include "media/base/pts_heap.h"
-#include "media/filters/decoder_base.h"
-#include "media/filters/video_decode_engine.h"
-#include "testing/gtest/include/gtest/gtest_prod.h"
-
-// FFmpeg types.
-struct AVCodecContext;
-struct AVFrame;
-struct AVRational;
-struct AVStream;
+#include "media/filters/video_decoder_impl.h"
namespace media {
-class FFmpegVideoDecodeEngine : public VideoDecodeEngine {
- public:
- FFmpegVideoDecodeEngine();
- virtual ~FFmpegVideoDecodeEngine();
-
- // Implementation of the VideoDecodeEngine Interface.
- virtual void Initialize(AVStream* stream, Task* done_cb);
- virtual void DecodeFrame(const Buffer& buffer, AVFrame* yuv_frame,
- bool* got_result, Task* done_cb);
- virtual void Flush(Task* done_cb);
- virtual VideoSurface::Format GetSurfaceFormat() const;
-
- virtual State state() const { return state_; }
-
- virtual AVCodecContext* codec_context() const { return codec_context_; }
-
- virtual void SetCodecContextForTest(AVCodecContext* context) {
- codec_context_ = context;
- }
+class FFmpegVideoDecodeEngine;
+class FilterFactory;
+class MediaFormat;
- private:
- AVCodecContext* codec_context_;
- State state_;
-
- DISALLOW_COPY_AND_ASSIGN(FFmpegVideoDecodeEngine);
-};
-
-class FFmpegVideoDecoder : public DecoderBase<VideoDecoder, VideoFrame> {
+class FFmpegVideoDecoder : public VideoDecoderImpl {
public:
static FilterFactory* CreateFactory();
static bool IsMediaFormatSupported(const MediaFormat& media_format);
- private:
- friend class FilterFactoryImpl1<FFmpegVideoDecoder, VideoDecodeEngine*>;
- friend class DecoderPrivateMock;
- friend class FFmpegVideoDecoderTest;
- FRIEND_TEST(FFmpegVideoDecoderTest, FindPtsAndDuration);
- FRIEND_TEST(FFmpegVideoDecoderTest, DoDecode_EnqueueVideoFrameError);
- FRIEND_TEST(FFmpegVideoDecoderTest, DoDecode_FinishEnqueuesEmptyFrames);
- FRIEND_TEST(FFmpegVideoDecoderTest, DoDecode_TestStateTransition);
- FRIEND_TEST(FFmpegVideoDecoderTest, DoSeek);
-
- // The TimeTuple struct is used to hold the needed timestamp data needed for
- // enqueuing a video frame.
- struct TimeTuple {
- base::TimeDelta timestamp;
- base::TimeDelta duration;
- };
-
- FFmpegVideoDecoder(VideoDecodeEngine* engine);
+ FFmpegVideoDecoder(FFmpegVideoDecodeEngine* engine);
virtual ~FFmpegVideoDecoder();
-
- // Implement DecoderBase template methods.
- virtual void DoInitialize(DemuxerStream* demuxer_stream, bool* success,
- Task* done_cb);
- virtual void DoSeek(base::TimeDelta time, Task* done_cb);
- virtual void DoDecode(Buffer* buffer, Task* done_cb);
-
- virtual bool EnqueueVideoFrame(VideoSurface::Format surface_format,
- const TimeTuple& time,
- const AVFrame* frame);
-
- // Create an empty video frame and queue it.
- virtual void EnqueueEmptyFrame();
-
- virtual void CopyPlane(size_t plane, const VideoSurface& surface,
- const AVFrame* frame);
-
- // Methods that pickup after the decode engine has finished its action.
- virtual void OnInitializeComplete(bool* success /* Not owned */,
- Task* done_cb);
- virtual void OnDecodeComplete(AVFrame* yuv_frame, bool* got_frame,
- Task* done_cb);
-
- // Attempt to get the PTS and Duration for this frame by examining the time
- // info provided via packet stream (stored in |pts_heap|), or the info
- // writen into the AVFrame itself. If no data is available in either, then
- // attempt to generate a best guess of the pts based on the last known pts.
- //
- // Data inside the AVFrame (if provided) is trusted the most, followed
- // by data from the packet stream. Estimation based on the |last_pts| is
- // reserved as a last-ditch effort.
- virtual TimeTuple FindPtsAndDuration(const AVRational& time_base,
- const PtsHeap& pts_heap,
- const TimeTuple& last_pts,
- const AVFrame* frame);
-
- // Signals the pipeline that a decode error occurs, and moves the decoder
- // into the kDecodeFinished state.
- virtual void SignalPipelineError();
-
- // Injection point for unittest to provide a mock engine. Takes ownership of
- // the provided engine.
- virtual void SetVideoDecodeEngineForTest(VideoDecodeEngine* engine);
-
- size_t width_;
- size_t height_;
-
- PtsHeap pts_heap_; // Heap of presentation timestamps.
- TimeTuple last_pts_;
- scoped_ptr<AVRational> time_base_; // Pointer to avoid needing full type.
-
- enum DecoderState {
- kNormal,
- kFlushCodec,
- kDecodeFinished,
- };
-
- DecoderState state_;
-
- scoped_ptr<VideoDecodeEngine> decode_engine_;
-
- DISALLOW_COPY_AND_ASSIGN(FFmpegVideoDecoder);
};
} // namespace media
diff --git a/media/filters/omx_video_decode_engine.cc b/media/filters/omx_video_decode_engine.cc
new file mode 100644
index 0000000..3404f3f
--- /dev/null
+++ b/media/filters/omx_video_decode_engine.cc
@@ -0,0 +1,166 @@
+// Copyright (c) 2009 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "media/filters/omx_video_decode_engine.h"
+
+#include "media/base/callback.h"
+#include "media/filters/ffmpeg_common.h"
+#include "media/omx/input_buffer.h"
+#include "media/omx/omx_codec.h"
+
+namespace media {
+
+OmxVideoDecodeEngine::OmxVideoDecodeEngine()
+ : state_(kCreated),
+ frame_bytes_(0),
+ has_fed_on_eos_(false),
+ message_loop_(NULL) {
+}
+
+OmxVideoDecodeEngine::~OmxVideoDecodeEngine() {
+}
+
+void OmxVideoDecodeEngine::Initialize(AVStream* stream, Task* done_cb) {
+ AutoTaskRunner done_runner(done_cb);
+ omx_codec_ = new media::OmxCodec(message_loop_);
+
+ width_ = stream->codec->width;
+ height_ = stream->codec->height;
+
+ // TODO(ajwong): Extract magic formula to something based on output
+ // pixel format.
+ frame_bytes_ = (width_ * height_ * 3) / 2;
+
+ // TODO(ajwong): Find the right way to determine the Omx component name.
+ omx_codec_->Setup("OMX.st.video_decoder.avc", OmxCodec::kCodecH264);
+ omx_codec_->SetErrorCallback(
+ NewCallback(this, &OmxVideoDecodeEngine::OnHardwareError));
+ omx_codec_->Start();
+ state_ = kNormal;
+}
+
+void OmxVideoDecodeEngine::OnHardwareError() {
+ // TODO(ajwong): Threading?
+ state_ = kError;
+}
+
+void OmxVideoDecodeEngine::DecodeFrame(const Buffer& buffer,
+ AVFrame* yuv_frame,
+ bool* got_result,
+ Task* done_cb) {
+ AutoTaskRunner done_runner(done_cb);
+
+ if (state_ != kNormal) {
+ return;
+ }
+
+ if (!has_fed_on_eos_) {
+ // TODO(ajwong): This is a memcpy() of the compressed frame. Avoid?
+ uint8* data = new uint8[buffer.GetDataSize()];
+ memcpy(data, buffer.GetData(), buffer.GetDataSize());
+
+ InputBuffer* input_buffer = new InputBuffer(data, buffer.GetDataSize());
+
+ // Feed in the new buffer regardless.
+ //
+ // TODO(ajwong): This callback stuff is messy. Cleanup.
+ CleanupCallback<OmxCodec::FeedCallback>* feed_done =
+ new CleanupCallback<OmxCodec::FeedCallback>(
+ NewCallback(this, &OmxVideoDecodeEngine::OnFeedDone));
+ feed_done->DeleteWhenDone(input_buffer);
+ omx_codec_->Feed(input_buffer, feed_done);
+
+ if (buffer.IsEndOfStream()) {
+ has_fed_on_eos_ = true;
+ }
+ }
+
+ omx_codec_->Read(NewCallback(this, &OmxVideoDecodeEngine::OnReadComplete));
+
+ if (DecodedFrameAvailable()) {
+ scoped_ptr<YuvFrame> frame(GetFrame());
+
+ // TODO(ajwong): This is a memcpy(). Avoid this.
+ // TODO(ajwong): This leaks memory. Fix by not using AVFrame.
+ const size_t frame_pixels = width_ * height_;
+ yuv_frame->data[0] = new uint8_t[frame_pixels];
+ yuv_frame->data[1] = new uint8_t[frame_pixels / 4];
+ yuv_frame->data[2] = new uint8_t[frame_pixels / 4];
+ yuv_frame->linesize[0] = width_;
+ yuv_frame->linesize[1] = width_ / 2;
+ yuv_frame->linesize[2] = width_ / 2;
+
+ memcpy(yuv_frame->data[0], frame->data, frame_pixels);
+ memcpy(yuv_frame->data[1], frame->data + frame_pixels, frame_pixels / 4);
+ memcpy(yuv_frame->data[2],
+ frame->data + frame_pixels + frame_pixels/4,
+ frame_pixels / 4);
+ }
+}
+
+void OmxVideoDecodeEngine::OnFeedDone(InputBuffer* buffer) {
+ // TODO(ajwong): Add a DoNothingCallback or similar.
+}
+
+void OmxVideoDecodeEngine::Flush(Task* done_cb) {
+ AutoLock lock(lock_);
+ omx_codec_->Flush(TaskToCallbackAdapter::NewCallback(done_cb));
+}
+
+VideoSurface::Format OmxVideoDecodeEngine::GetSurfaceFormat() const {
+ return VideoSurface::YV12;
+}
+
+void OmxVideoDecodeEngine::Stop(Callback0::Type* done_cb) {
+ AutoLock lock(lock_);
+ omx_codec_->Stop(done_cb);
+ state_ = kStopped;
+}
+
+void OmxVideoDecodeEngine::OnReadComplete(uint8* buffer, int size) {
+ if ((size_t)size != frame_bytes_) {
+ LOG(ERROR) << "Read completed with weird size: " << size;
+ }
+ MergeBytesFrameQueue(buffer, size);
+}
+
+bool OmxVideoDecodeEngine::IsFrameComplete(const YuvFrame* frame) {
+ return frame->size == frame_bytes_;
+}
+
+bool OmxVideoDecodeEngine::DecodedFrameAvailable() {
+ AutoLock lock(lock_);
+ return (!yuv_frame_queue_.empty() &&
+ IsFrameComplete(yuv_frame_queue_.front()));
+}
+
+void OmxVideoDecodeEngine::MergeBytesFrameQueue(uint8* buffer, int size) {
+ AutoLock lock(lock_);
+ int amount_left = size;
+
+ // TODO(ajwong): Do the swizzle here instead of in DecodeFrame. This
+ // should be able to avoid 1 memcpy.
+ while (amount_left > 0) {
+ if (yuv_frame_queue_.empty() || IsFrameComplete(yuv_frame_queue_.back())) {
+ yuv_frame_queue_.push_back(new YuvFrame(frame_bytes_));
+ }
+ YuvFrame* frame = yuv_frame_queue_.back();
+ int amount_to_copy = std::min((int)(frame_bytes_ - frame->size), size);
+ frame->size += amount_to_copy;
+ memcpy(frame->data, buffer, amount_to_copy);
+ amount_left -= amount_to_copy;
+ }
+}
+
+OmxVideoDecodeEngine::YuvFrame* OmxVideoDecodeEngine::GetFrame() {
+ AutoLock lock(lock_);
+ if (yuv_frame_queue_.empty()) {
+ return NULL;
+ }
+ YuvFrame* frame = yuv_frame_queue_.front();
+ yuv_frame_queue_.pop_front();
+ return frame;
+}
+
+} // namespace media
diff --git a/media/filters/omx_video_decode_engine.h b/media/filters/omx_video_decode_engine.h
new file mode 100644
index 0000000..956141c
--- /dev/null
+++ b/media/filters/omx_video_decode_engine.h
@@ -0,0 +1,95 @@
+// Copyright (c) 2009 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef MEDIA_FILTERS_OMX_VIDEO_DECODE_ENGINE_H_
+#define MEDIA_FILTERS_OMX_VIDEO_DECODE_ENGINE_H_
+
+#include <list>
+
+#include "base/lock.h"
+#include "base/task.h"
+#include "media/filters/video_decode_engine.h"
+
+class MessageLoop;
+
+// FFmpeg types.
+struct AVFrame;
+struct AVRational;
+struct AVStream;
+
+namespace media {
+
+class InputBuffer;
+class OmxCodec;
+
+class OmxVideoDecodeEngine : public VideoDecodeEngine {
+ public:
+ OmxVideoDecodeEngine();
+ virtual ~OmxVideoDecodeEngine();
+
+ // Implementation of the VideoDecodeEngine Interface.
+ virtual void Initialize(AVStream* stream, Task* done_cb);
+ virtual void DecodeFrame(const Buffer& buffer, AVFrame* yuv_frame,
+ bool* got_result, Task* done_cb);
+ virtual void Flush(Task* done_cb);
+ virtual VideoSurface::Format GetSurfaceFormat() const;
+
+ virtual State state() const { return state_; }
+
+ // Stops the engine.
+ //
+ // TODO(ajwong): Normalize this interface with Task like the others, and
+ // promote to the abstract interface.
+ virtual void Stop(Callback0::Type* done_cb);
+
+ virtual void set_message_loop(MessageLoop* message_loop) {
+ message_loop_ = message_loop;
+ };
+
+ private:
+ struct YuvFrame {
+ // TODO(ajwong): Please avoid ever using this class anywhere else until
+ // we've consolidated the buffer struct.
+ YuvFrame(size_t c) {
+ size = 0;
+ capacity = c;
+ data = new unsigned char[capacity];
+ }
+ ~YuvFrame() {
+ delete [] data;
+ }
+ size_t size;
+ size_t capacity;
+ unsigned char* data;
+ };
+
+ virtual void OnFeedDone(InputBuffer* buffer);
+ virtual void OnHardwareError();
+ virtual void OnReadComplete(uint8* buffer, int size);
+
+ virtual bool DecodedFrameAvailable();
+ virtual void MergeBytesFrameQueue(uint8* buffer, int size);
+ virtual bool IsFrameComplete(const YuvFrame* frame);
+ virtual YuvFrame* GetFrame();
+
+ Lock lock_; // Locks the |state_| variable and the |yuv_frame_queue_|.
+ State state_;
+ size_t frame_bytes_;
+ size_t width_;
+ size_t height_;
+
+ bool has_fed_on_eos_; // Used to avoid sending an end of stream to
+ // OpenMax twice since OpenMax does not always
+ // handle this nicely.
+ std::list<YuvFrame*> yuv_frame_queue_;
+
+ scoped_refptr<media::OmxCodec> omx_codec_;
+ MessageLoop* message_loop_;
+
+ DISALLOW_COPY_AND_ASSIGN(OmxVideoDecodeEngine);
+};
+
+} // namespace media
+
+#endif // MEDIA_FILTERS_OMX_VIDEO_DECODE_ENGINE_H_
diff --git a/media/filters/omx_video_decoder.cc b/media/filters/omx_video_decoder.cc
new file mode 100644
index 0000000..87baed7
--- /dev/null
+++ b/media/filters/omx_video_decoder.cc
@@ -0,0 +1,59 @@
+// Copyright (c) 2009 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "media/filters/omx_video_decoder.h"
+
+#include "base/waitable_event.h"
+#include "media/filters/ffmpeg_common.h"
+#include "media/filters/omx_video_decode_engine.h"
+
+namespace media {
+
+// static
+FilterFactory* OmxVideoDecoder::CreateFactory() {
+ return new FilterFactoryImpl1<OmxVideoDecoder, OmxVideoDecodeEngine*>(
+ new OmxVideoDecodeEngine());
+}
+
+// static
+bool OmxVideoDecoder::IsMediaFormatSupported(const MediaFormat& format) {
+ std::string mime_type;
+ if (!format.GetAsString(MediaFormat::kMimeType, &mime_type) ||
+ mime_type::kFFmpegVideo != mime_type) {
+ return false;
+ }
+
+ // TODO(ajwong): Find a good way to white-list formats that OpenMAX can
+ // handle.
+ int codec_id;
+ if (format.GetAsInteger(MediaFormat::kFFmpegCodecID, &codec_id) &&
+ codec_id == CODEC_ID_H264) {
+ return true;
+ }
+
+ return false;
+}
+
+OmxVideoDecoder::OmxVideoDecoder(OmxVideoDecodeEngine* engine)
+ : VideoDecoderImpl(engine),
+ omx_engine_(engine) {
+}
+
+OmxVideoDecoder::~OmxVideoDecoder() {
+}
+
+void OmxVideoDecoder::set_message_loop(MessageLoop* message_loop) {
+ // TODO(ajwong): Is there a way around needing to propogate the message loop?
+ VideoDecoderImpl::set_message_loop(message_loop);
+ omx_engine_->set_message_loop(message_loop);
+}
+
+void OmxVideoDecoder::Stop() {
+ // TODO(ajwong): This is a total hack. Make async.
+ base::WaitableEvent event(false, false);
+ omx_engine_->Stop(NewCallback(&event, &base::WaitableEvent::Signal));
+ event.Wait();
+}
+
+} // namespace media
diff --git a/media/filters/omx_video_decoder.h b/media/filters/omx_video_decoder.h
new file mode 100644
index 0000000..e01ce5f
--- /dev/null
+++ b/media/filters/omx_video_decoder.h
@@ -0,0 +1,37 @@
+// Copyright (c) 2009 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef MEDIA_FILTERS_OMX_VIDEO_DECODER_H_
+#define MEDIA_FILTERS_OMX_VIDEO_DECODER_H_
+
+#include "media/filters/video_decoder_impl.h"
+
+class MessageLoop;
+
+namespace media {
+
+class FilterFactory;
+class MediaFormat;
+class OmxVideoDecodeEngine;
+
+class OmxVideoDecoder : public VideoDecoderImpl {
+ public:
+ static FilterFactory* CreateFactory();
+ static bool IsMediaFormatSupported(const MediaFormat& media_format);
+
+ OmxVideoDecoder(OmxVideoDecodeEngine* engine);
+ virtual ~OmxVideoDecoder();
+
+ virtual void set_message_loop(MessageLoop* message_loop);
+ virtual void Stop();
+
+ private:
+ OmxVideoDecodeEngine* omx_engine_;
+
+ DISALLOW_COPY_AND_ASSIGN(OmxVideoDecoder);
+};
+
+} // namespace media
+
+#endif // MEDIA_FILTERS_OMX_VIDEO_DECODER_H_
diff --git a/media/filters/video_decode_engine.h b/media/filters/video_decode_engine.h
index f7db194..9cfbe97 100644
--- a/media/filters/video_decode_engine.h
+++ b/media/filters/video_decode_engine.h
@@ -5,6 +5,8 @@
#ifndef MEDIA_FILTERS_VIDEO_DECODE_ENGINE_H_
#define MEDIA_FILTERS_VIDEO_DECODE_ENGINE_H_
+#include "media/base/buffers.h" // For VideoSurface.
+
// FFmpeg types.
//
// TODO(ajwong): Try to cut the dependency on the FFmpeg types.
@@ -22,6 +24,7 @@ class VideoDecodeEngine {
enum State {
kCreated,
kNormal,
+ kStopped,
kError,
};
diff --git a/media/filters/video_decoder_impl.cc b/media/filters/video_decoder_impl.cc
new file mode 100644
index 0000000..8390671
--- /dev/null
+++ b/media/filters/video_decoder_impl.cc
@@ -0,0 +1,317 @@
+// Copyright (c) 2009 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "media/filters/video_decoder_impl.h"
+
+#include "base/task.h"
+#include "media/base/filters.h"
+#include "media/base/limits.h"
+#include "media/base/video_frame_impl.h"
+#include "media/filters/ffmpeg_common.h"
+#include "media/filters/ffmpeg_interfaces.h"
+#include "media/filters/video_decode_engine.h"
+#include "media/ffmpeg/ffmpeg_util.h"
+
+namespace media {
+
+VideoDecoderImpl::VideoDecoderImpl(VideoDecodeEngine* engine)
+ : width_(0),
+ height_(0),
+ time_base_(new AVRational()),
+ state_(kNormal),
+ decode_engine_(engine) {
+}
+
+VideoDecoderImpl::~VideoDecoderImpl() {
+}
+
+void VideoDecoderImpl::DoInitialize(DemuxerStream* demuxer_stream,
+ bool* success,
+ Task* done_cb) {
+ AutoTaskRunner done_runner(done_cb);
+ *success = false;
+
+ // Get the AVStream by querying for the provider interface.
+ AVStreamProvider* av_stream_provider;
+ if (!demuxer_stream->QueryInterface(&av_stream_provider)) {
+ return;
+ }
+ AVStream* av_stream = av_stream_provider->GetAVStream();
+
+ *time_base_ = av_stream->time_base;
+
+ // TODO(ajwong): We don't need these extra variables if |media_format_| has
+ // them. Remove.
+ width_ = av_stream->codec->width;
+ height_ = av_stream->codec->height;
+ if (width_ > Limits::kMaxDimension ||
+ height_ > Limits::kMaxDimension ||
+ (width_ * height_) > Limits::kMaxCanvas) {
+ return;
+ }
+
+ media_format_.SetAsString(MediaFormat::kMimeType,
+ mime_type::kUncompressedVideo);
+ media_format_.SetAsInteger(MediaFormat::kWidth, width_);
+ media_format_.SetAsInteger(MediaFormat::kHeight, height_);
+
+ decode_engine_->Initialize(
+ av_stream,
+ NewRunnableMethod(this,
+ &VideoDecoderImpl::OnInitializeComplete,
+ success,
+ done_runner.release()));
+}
+
+void VideoDecoderImpl::OnInitializeComplete(bool* success, Task* done_cb) {
+ AutoTaskRunner done_runner(done_cb);
+
+ *success = decode_engine_->state() == VideoDecodeEngine::kNormal;
+}
+
+void VideoDecoderImpl::DoSeek(base::TimeDelta time, Task* done_cb) {
+ // Everything in the presentation time queue is invalid, clear the queue.
+ while (!pts_heap_.IsEmpty())
+ pts_heap_.Pop();
+
+ // We're back where we started. It should be completely safe to flush here
+ // since DecoderBase uses |expecting_discontinuous_| to verify that the next
+ // time DoDecode() is called we will have a discontinuous buffer.
+ //
+ // TODO(ajwong): Should we put a guard here to prevent leaving kError.
+ state_ = kNormal;
+
+ decode_engine_->Flush(done_cb);
+}
+
+void VideoDecoderImpl::DoDecode(Buffer* buffer, Task* done_cb) {
+ AutoTaskRunner done_runner(done_cb);
+
+ // TODO(ajwong): This DoDecode() and OnDecodeComplete() set of functions is
+ // too complicated to easily unittest. The test becomes fragile. Try to
+ // find a way to reorganize into smaller units for testing.
+
+ // During decode, because reads are issued asynchronously, it is possible to
+ // receive multiple end of stream buffers since each read is acked. When the
+ // first end of stream buffer is read, FFmpeg may still have frames queued
+ // up in the decoder so we need to go through the decode loop until it stops
+ // giving sensible data. After that, the decoder should output empty
+ // frames. There are three states the decoder can be in:
+ //
+ // kNormal: This is the starting state. Buffers are decoded. Decode errors
+ // are discarded.
+ // kFlushCodec: There isn't any more input data. Call avcodec_decode_video2
+ // until no more data is returned to flush out remaining
+ // frames. The input buffer is ignored at this point.
+ // kDecodeFinished: All calls return empty frames.
+ //
+ // These are the possible state transitions.
+ //
+ // kNormal -> kFlushCodec:
+ // When buffer->IsEndOfStream() is first true.
+ // kNormal -> kDecodeFinished:
+ // A catastrophic failure occurs, and decoding needs to stop.
+ // kFlushCodec -> kDecodeFinished:
+ // When avcodec_decode_video2() returns 0 data or errors out.
+ // (any state) -> kNormal:
+ // Any time buffer->IsDiscontinuous() is true.
+ //
+ // If the decoding is finished, we just always return empty frames.
+ if (state_ == kDecodeFinished) {
+ EnqueueEmptyFrame();
+ return;
+ }
+
+ // Transition to kFlushCodec on the first end of stream buffer.
+ if (state_ == kNormal && buffer->IsEndOfStream()) {
+ state_ = kFlushCodec;
+ }
+
+ // Push all incoming timestamps into the priority queue as long as we have
+ // not yet received an end of stream buffer. It is important that this line
+ // stay below the state transition into kFlushCodec done above.
+ //
+ // TODO(ajwong): This push logic, along with the pop logic below needs to
+ // be reevaluated to correctly handle decode errors.
+ if (state_ == kNormal) {
+ pts_heap_.Push(buffer->GetTimestamp());
+ }
+
+ // Otherwise, attempt to decode a single frame.
+ AVFrame* yuv_frame = avcodec_alloc_frame();
+ bool* got_frame = new bool;
+ decode_engine_->DecodeFrame(
+ *buffer,
+ yuv_frame,
+ got_frame,
+ NewRunnableMethod(this,
+ &VideoDecoderImpl::OnDecodeComplete,
+ yuv_frame,
+ got_frame,
+ done_runner.release()));
+}
+
+void VideoDecoderImpl::OnDecodeComplete(AVFrame* yuv_frame, bool* got_frame,
+ Task* done_cb) {
+ // Note: The |done_runner| must be declared *last* to ensure proper
+ // destruction order.
+ scoped_ptr_malloc<AVFrame, ScopedPtrAVFree> yuv_frame_deleter(yuv_frame);
+ scoped_ptr<bool> got_frame_deleter(got_frame);
+ AutoTaskRunner done_runner(done_cb);
+
+ // If we actually got data back, enqueue a frame.
+ if (*got_frame) {
+ last_pts_ = FindPtsAndDuration(*time_base_, pts_heap_, last_pts_,
+ yuv_frame);
+
+ // Pop off a pts on a successful decode since we are "using up" one
+ // timestamp.
+ //
+ // TODO(ajwong): Do we need to pop off a pts when avcodec_decode_video2()
+ // returns < 0? The rationale is that when get_picture_ptr == 0, we skip
+ // popping a pts because no frame was produced. However, when
+ // avcodec_decode_video2() returns false, it is a decode error, which
+ // if it means a frame is dropped, may require us to pop one more time.
+ if (!pts_heap_.IsEmpty()) {
+ pts_heap_.Pop();
+ } else {
+ NOTREACHED() << "Attempting to decode more frames than were input.";
+ }
+
+ if (!EnqueueVideoFrame(
+ decode_engine_->GetSurfaceFormat(), last_pts_, yuv_frame)) {
+ // On an EnqueueEmptyFrame error, error out the whole pipeline and
+ // set the state to kDecodeFinished.
+ SignalPipelineError();
+ }
+ } else {
+ // When in kFlushCodec, any errored decode, or a 0-lengthed frame,
+ // is taken as a signal to stop decoding.
+ if (state_ == kFlushCodec) {
+ state_ = kDecodeFinished;
+ EnqueueEmptyFrame();
+ }
+ }
+}
+
+bool VideoDecoderImpl::EnqueueVideoFrame(VideoSurface::Format surface_format,
+ const TimeTuple& time,
+ const AVFrame* frame) {
+ // TODO(fbarchard): Work around for FFmpeg http://crbug.com/27675
+ // The decoder is in a bad state and not decoding correctly.
+ // Checking for NULL avoids a crash in CopyPlane().
+ if (!frame->data[VideoSurface::kYPlane] ||
+ !frame->data[VideoSurface::kUPlane] ||
+ !frame->data[VideoSurface::kVPlane]) {
+ return true;
+ }
+
+ scoped_refptr<VideoFrame> video_frame;
+ VideoFrameImpl::CreateFrame(surface_format, width_, height_,
+ time.timestamp, time.duration, &video_frame);
+ if (!video_frame) {
+ return false;
+ }
+
+ // Copy the frame data since FFmpeg reuses internal buffers for AVFrame
+ // output, meaning the data is only valid until the next
+ // avcodec_decode_video() call.
+ // TODO(scherkus): figure out pre-allocation/buffer cycling scheme.
+ // TODO(scherkus): is there a cleaner way to figure out the # of planes?
+ VideoSurface surface;
+ if (!video_frame->Lock(&surface)) {
+ return false;
+ }
+ CopyPlane(VideoSurface::kYPlane, surface, frame);
+ CopyPlane(VideoSurface::kUPlane, surface, frame);
+ CopyPlane(VideoSurface::kVPlane, surface, frame);
+ video_frame->Unlock();
+ EnqueueResult(video_frame);
+ return true;
+}
+
+void VideoDecoderImpl::CopyPlane(size_t plane,
+ const VideoSurface& surface,
+ const AVFrame* frame) {
+ DCHECK(surface.width % 2 == 0);
+ const uint8* source = frame->data[plane];
+ const size_t source_stride = frame->linesize[plane];
+ uint8* dest = surface.data[plane];
+ const size_t dest_stride = surface.strides[plane];
+ size_t bytes_per_line = surface.width;
+ size_t copy_lines = surface.height;
+ if (plane != VideoSurface::kYPlane) {
+ bytes_per_line /= 2;
+ if (surface.format == VideoSurface::YV12) {
+ copy_lines = (copy_lines + 1) / 2;
+ }
+ }
+ DCHECK(bytes_per_line <= source_stride && bytes_per_line <= dest_stride);
+ for (size_t i = 0; i < copy_lines; ++i) {
+ memcpy(dest, source, bytes_per_line);
+ source += source_stride;
+ dest += dest_stride;
+ }
+}
+
+void VideoDecoderImpl::EnqueueEmptyFrame() {
+ scoped_refptr<VideoFrame> video_frame;
+ VideoFrameImpl::CreateEmptyFrame(&video_frame);
+ EnqueueResult(video_frame);
+}
+
+VideoDecoderImpl::TimeTuple VideoDecoderImpl::FindPtsAndDuration(
+ const AVRational& time_base,
+ const PtsHeap& pts_heap,
+ const TimeTuple& last_pts,
+ const AVFrame* frame) {
+ TimeTuple pts;
+
+ // Default |repeat_pict| to 0 because if there is no frame information,
+ // we just assume the frame only plays for one time_base.
+ int repeat_pict = 0;
+
+ // First search the AVFrame for the pts. This is the most authoritative.
+ // Make a special exclusion for the value frame->pts == 0. Though this
+ // is technically a valid value, it seems a number of ffmpeg codecs will
+ // mistakenly always set frame->pts to 0.
+ //
+ // Oh, and we have to cast AV_NOPTS_VALUE since it ends up becoming unsigned
+ // because the value they use doesn't fit in a signed 64-bit number which
+ // produces a signedness comparison warning on gcc.
+ if (frame &&
+ (frame->pts != static_cast<int64_t>(AV_NOPTS_VALUE)) &&
+ (frame->pts != 0)) {
+ pts.timestamp = ConvertTimestamp(time_base, frame->pts);
+ repeat_pict = frame->repeat_pict;
+ } else if (!pts_heap.IsEmpty()) {
+ // If the frame did not have pts, try to get the pts from the
+ // |pts_heap|.
+ pts.timestamp = pts_heap.Top();
+ } else {
+ DCHECK(last_pts.timestamp != StreamSample::kInvalidTimestamp);
+ DCHECK(last_pts.duration != StreamSample::kInvalidTimestamp);
+ // Unable to read the pts from anywhere. Time to guess.
+ pts.timestamp = last_pts.timestamp + last_pts.duration;
+ }
+
+ // Fill in the duration while accounting for repeated frames.
+ //
+ // TODO(ajwong): Make sure this formula is correct.
+ pts.duration = ConvertTimestamp(time_base, 1 + repeat_pict);
+
+ return pts;
+}
+
+void VideoDecoderImpl::SignalPipelineError() {
+ host()->SetError(PIPELINE_ERROR_DECODE);
+ state_ = kDecodeFinished;
+}
+
+void VideoDecoderImpl::SetVideoDecodeEngineForTest(
+ VideoDecodeEngine* engine) {
+ decode_engine_.reset(engine);
+}
+
+} // namespace media
diff --git a/media/filters/video_decoder_impl.h b/media/filters/video_decoder_impl.h
new file mode 100644
index 0000000..959b5b0
--- /dev/null
+++ b/media/filters/video_decoder_impl.h
@@ -0,0 +1,114 @@
+// Copyright (c) 2009 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef MEDIA_FILTERS_VIDEO_DECODER_IMPL_H_
+#define MEDIA_FILTERS_VIDEO_DECODER_IMPL_H_
+
+#include "base/time.h"
+#include "media/base/pts_heap.h"
+#include "media/filters/decoder_base.h"
+#include "testing/gtest/include/gtest/gtest_prod.h"
+
+// FFmpeg types.
+struct AVFrame;
+struct AVRational;
+
+namespace media {
+
+class VideoDecodeEngine;
+
+class VideoDecoderImpl : public DecoderBase<VideoDecoder, VideoFrame> {
+ protected:
+ VideoDecoderImpl(VideoDecodeEngine* engine);
+
+ // Make this class abstract to document that this class cannot be used
+ // directly as a filter type because it does not implement the static methods
+ // CreateFactory() and IsMediaFormatSupported().
+ //
+ // TODO(ajwong): When we clean up the filters to not required a static
+ // implementation of CreateFactory() and IsMediaFormatSupported(), this
+ // class doesn't probably need to be abstract.
+ virtual ~VideoDecoderImpl() = 0;
+
+ private:
+ friend class FilterFactoryImpl1<VideoDecoderImpl, VideoDecodeEngine*>;
+ friend class DecoderPrivateMock;
+ friend class VideoDecoderImplTest;
+ FRIEND_TEST(VideoDecoderImplTest, FindPtsAndDuration);
+ FRIEND_TEST(VideoDecoderImplTest, DoDecode_EnqueueVideoFrameError);
+ FRIEND_TEST(VideoDecoderImplTest, DoDecode_FinishEnqueuesEmptyFrames);
+ FRIEND_TEST(VideoDecoderImplTest, DoDecode_TestStateTransition);
+ FRIEND_TEST(VideoDecoderImplTest, DoSeek);
+
+ // The TimeTuple struct is used to hold the needed timestamp data needed for
+ // enqueuing a video frame.
+ struct TimeTuple {
+ base::TimeDelta timestamp;
+ base::TimeDelta duration;
+ };
+
+ enum DecoderState {
+ kNormal,
+ kFlushCodec,
+ kDecodeFinished,
+ };
+
+ // Implement DecoderBase template methods.
+ virtual void DoInitialize(DemuxerStream* demuxer_stream, bool* success,
+ Task* done_cb);
+ virtual void DoSeek(base::TimeDelta time, Task* done_cb);
+ virtual void DoDecode(Buffer* buffer, Task* done_cb);
+
+ virtual bool EnqueueVideoFrame(VideoSurface::Format surface_format,
+ const TimeTuple& time,
+ const AVFrame* frame);
+
+ // Create an empty video frame and queue it.
+ virtual void EnqueueEmptyFrame();
+
+ virtual void CopyPlane(size_t plane, const VideoSurface& surface,
+ const AVFrame* frame);
+
+ // Methods that pickup after the decode engine has finished its action.
+ virtual void OnInitializeComplete(bool* success /* Not owned */,
+ Task* done_cb);
+ virtual void OnDecodeComplete(AVFrame* yuv_frame, bool* got_frame,
+ Task* done_cb);
+
+ // Attempt to get the PTS and Duration for this frame by examining the time
+ // info provided via packet stream (stored in |pts_heap|), or the info
+ // writen into the AVFrame itself. If no data is available in either, then
+ // attempt to generate a best guess of the pts based on the last known pts.
+ //
+ // Data inside the AVFrame (if provided) is trusted the most, followed
+ // by data from the packet stream. Estimation based on the |last_pts| is
+ // reserved as a last-ditch effort.
+ virtual TimeTuple FindPtsAndDuration(const AVRational& time_base,
+ const PtsHeap& pts_heap,
+ const TimeTuple& last_pts,
+ const AVFrame* frame);
+
+ // Signals the pipeline that a decode error occurs, and moves the decoder
+ // into the kDecodeFinished state.
+ virtual void SignalPipelineError();
+
+ // Injection point for unittest to provide a mock engine. Takes ownership of
+ // the provided engine.
+ virtual void SetVideoDecodeEngineForTest(VideoDecodeEngine* engine);
+
+ size_t width_;
+ size_t height_;
+
+ PtsHeap pts_heap_; // Heap of presentation timestamps.
+ TimeTuple last_pts_;
+ scoped_ptr<AVRational> time_base_; // Pointer to avoid needing full type.
+ DecoderState state_;
+ scoped_ptr<VideoDecodeEngine> decode_engine_;
+
+ DISALLOW_COPY_AND_ASSIGN(VideoDecoderImpl);
+};
+
+} // namespace media
+
+#endif // MEDIA_FILTERS_VIDEO_DECODER_IMPL_H_
diff --git a/media/filters/ffmpeg_video_decoder_unittest.cc b/media/filters/video_decoder_impl_unittest.cc
index 7bb05a9..a9164b1 100644
--- a/media/filters/ffmpeg_video_decoder_unittest.cc
+++ b/media/filters/video_decoder_impl_unittest.cc
@@ -15,6 +15,8 @@
#include "media/filters/ffmpeg_common.h"
#include "media/filters/ffmpeg_interfaces.h"
#include "media/filters/ffmpeg_video_decoder.h"
+#include "media/filters/video_decode_engine.h"
+#include "media/filters/video_decoder_impl.h"
#include "testing/gtest/include/gtest/gtest.h"
using ::testing::_;
@@ -54,10 +56,10 @@ class MockVideoDecodeEngine : public VideoDecodeEngine {
};
// Class that just mocks the private functions.
-class DecoderPrivateMock : public FFmpegVideoDecoder {
+class DecoderPrivateMock : public VideoDecoderImpl {
public:
DecoderPrivateMock(VideoDecodeEngine* engine)
- : FFmpegVideoDecoder(engine) {
+ : VideoDecoderImpl(engine) {
}
MOCK_METHOD3(EnqueueVideoFrame, bool(VideoSurface::Format surface_format,
@@ -75,20 +77,23 @@ class DecoderPrivateMock : public FFmpegVideoDecoder {
// Fixture class to facilitate writing tests. Takes care of setting up the
// FFmpeg, pipeline and filter host mocks.
-class FFmpegVideoDecoderTest : public testing::Test {
+class VideoDecoderImplTest : public testing::Test {
protected:
static const int kWidth;
static const int kHeight;
- static const FFmpegVideoDecoder::TimeTuple kTestPts1;
- static const FFmpegVideoDecoder::TimeTuple kTestPts2;
+ static const VideoDecoderImpl::TimeTuple kTestPts1;
+ static const VideoDecoderImpl::TimeTuple kTestPts2;
- FFmpegVideoDecoderTest() {
+ VideoDecoderImplTest() {
MediaFormat media_format;
media_format.SetAsString(MediaFormat::kMimeType, mime_type::kFFmpegVideo);
- // Create an FFmpegVideoDecoder, and MockVideoDecodeEngine.
+ // Create an VideoDecoderImpl, and MockVideoDecodeEngine. We use an
+ // FFmpegVideoDecoder to instantiate a full VideoDecoderImpl with an engine.
+ //
+ // TODO(ajwong): Break the test's dependency on FFmpegVideoDecoder.
factory_ = FFmpegVideoDecoder::CreateFactory();
- decoder_ = factory_->Create<FFmpegVideoDecoder>(media_format);
+ decoder_ = factory_->Create<VideoDecoderImpl>(media_format);
engine_ = new StrictMock<MockVideoDecodeEngine>();
DCHECK(decoder_);
@@ -115,7 +120,7 @@ class FFmpegVideoDecoderTest : public testing::Test {
MockFFmpeg::set(&mock_ffmpeg_);
}
- virtual ~FFmpegVideoDecoderTest() {
+ virtual ~VideoDecoderImplTest() {
// Call Stop() to shut down internal threads.
decoder_->Stop();
@@ -129,7 +134,7 @@ class FFmpegVideoDecoderTest : public testing::Test {
// Fixture members.
scoped_refptr<FilterFactory> factory_;
MockVideoDecodeEngine* engine_; // Owned by |decoder_|.
- scoped_refptr<FFmpegVideoDecoder> decoder_;
+ scoped_refptr<VideoDecoderImpl> decoder_;
scoped_refptr<StrictMock<MockFFmpegDemuxerStream> > demuxer_;
scoped_refptr<DataBuffer> buffer_;
scoped_refptr<DataBuffer> end_of_stream_buffer_;
@@ -145,19 +150,19 @@ class FFmpegVideoDecoderTest : public testing::Test {
StrictMock<MockFFmpeg> mock_ffmpeg_;
private:
- DISALLOW_COPY_AND_ASSIGN(FFmpegVideoDecoderTest);
+ DISALLOW_COPY_AND_ASSIGN(VideoDecoderImplTest);
};
-const int FFmpegVideoDecoderTest::kWidth = 1280;
-const int FFmpegVideoDecoderTest::kHeight = 720;
-const FFmpegVideoDecoder::TimeTuple FFmpegVideoDecoderTest::kTestPts1 =
+const int VideoDecoderImplTest::kWidth = 1280;
+const int VideoDecoderImplTest::kHeight = 720;
+const VideoDecoderImpl::TimeTuple VideoDecoderImplTest::kTestPts1 =
{ base::TimeDelta::FromMicroseconds(123),
base::TimeDelta::FromMicroseconds(50) };
-const FFmpegVideoDecoder::TimeTuple FFmpegVideoDecoderTest::kTestPts2 =
+const VideoDecoderImpl::TimeTuple VideoDecoderImplTest::kTestPts2 =
{ base::TimeDelta::FromMicroseconds(456),
base::TimeDelta::FromMicroseconds(60) };
-TEST(FFmpegVideoDecoderFactoryTest, Create) {
+TEST(VideoDecoderImplFactoryTest, Create) {
// Should only accept video/x-ffmpeg mime type.
scoped_refptr<FilterFactory> factory = FFmpegVideoDecoder::CreateFactory();
MediaFormat media_format;
@@ -174,7 +179,7 @@ TEST(FFmpegVideoDecoderFactoryTest, Create) {
ASSERT_TRUE(decoder);
}
-TEST_F(FFmpegVideoDecoderTest, Initialize_QueryInterfaceFails) {
+TEST_F(VideoDecoderImplTest, Initialize_QueryInterfaceFails) {
// Test QueryInterface returning NULL.
EXPECT_CALL(*demuxer_, QueryInterface(AVStreamProvider::interface_id()))
.WillOnce(ReturnNull());
@@ -186,7 +191,7 @@ TEST_F(FFmpegVideoDecoderTest, Initialize_QueryInterfaceFails) {
message_loop_.RunAllPending();
}
-TEST_F(FFmpegVideoDecoderTest, Initialize_EngineFails) {
+TEST_F(VideoDecoderImplTest, Initialize_EngineFails) {
// Test successful initialization.
AVStreamProvider* av_stream_provider = demuxer_;
EXPECT_CALL(*demuxer_, QueryInterface(AVStreamProvider::interface_id()))
@@ -208,7 +213,7 @@ TEST_F(FFmpegVideoDecoderTest, Initialize_EngineFails) {
message_loop_.RunAllPending();
}
-TEST_F(FFmpegVideoDecoderTest, Initialize_Successful) {
+TEST_F(VideoDecoderImplTest, Initialize_Successful) {
// Test successful initialization.
AVStreamProvider* av_stream_provider = demuxer_;
EXPECT_CALL(*demuxer_, QueryInterface(AVStreamProvider::interface_id()))
@@ -241,7 +246,7 @@ TEST_F(FFmpegVideoDecoderTest, Initialize_Successful) {
EXPECT_EQ(kHeight, height);
}
-TEST_F(FFmpegVideoDecoderTest, FindPtsAndDuration) {
+TEST_F(VideoDecoderImplTest, FindPtsAndDuration) {
// Start with an empty timestamp queue.
PtsHeap pts_heap;
@@ -251,13 +256,13 @@ TEST_F(FFmpegVideoDecoderTest, FindPtsAndDuration) {
// Setup the last known pts to be at 100 microseconds with a have 16
// duration.
- FFmpegVideoDecoder::TimeTuple last_pts;
+ VideoDecoderImpl::TimeTuple last_pts;
last_pts.timestamp = base::TimeDelta::FromMicroseconds(100);
last_pts.duration = base::TimeDelta::FromMicroseconds(16);
// Simulate an uninitialized yuv_frame.
yuv_frame_.pts = AV_NOPTS_VALUE;
- FFmpegVideoDecoder::TimeTuple result_pts =
+ VideoDecoderImpl::TimeTuple result_pts =
decoder_->FindPtsAndDuration(time_base, pts_heap, last_pts, &yuv_frame_);
EXPECT_EQ(116, result_pts.timestamp.InMicroseconds());
EXPECT_EQ(500000, result_pts.duration.InMicroseconds());
@@ -300,7 +305,7 @@ TEST_F(FFmpegVideoDecoderTest, FindPtsAndDuration) {
EXPECT_EQ(1500000, result_pts.duration.InMicroseconds());
}
-TEST_F(FFmpegVideoDecoderTest, DoDecode_TestStateTransition) {
+TEST_F(VideoDecoderImplTest, DoDecode_TestStateTransition) {
// Simulates a input sequence of three buffers, and six decode requests to
// exercise the state transitions, and bookkeeping logic of DoDecode.
//
@@ -359,20 +364,20 @@ TEST_F(FFmpegVideoDecoderTest, DoDecode_TestStateTransition) {
EXPECT_CALL(done_cb, Run()).Times(6);
// Setup initial state and check that it is sane.
- ASSERT_EQ(FFmpegVideoDecoder::kNormal, mock_decoder->state_);
+ ASSERT_EQ(VideoDecoderImpl::kNormal, mock_decoder->state_);
ASSERT_TRUE(base::TimeDelta() == mock_decoder->last_pts_.timestamp);
ASSERT_TRUE(base::TimeDelta() == mock_decoder->last_pts_.duration);
// Decode once, which should simulate a buffering call.
mock_decoder->DoDecode(buffer_, done_cb.CreateTask());
- EXPECT_EQ(FFmpegVideoDecoder::kNormal, mock_decoder->state_);
+ EXPECT_EQ(VideoDecoderImpl::kNormal, mock_decoder->state_);
ASSERT_TRUE(base::TimeDelta() == mock_decoder->last_pts_.timestamp);
ASSERT_TRUE(base::TimeDelta() == mock_decoder->last_pts_.duration);
EXPECT_FALSE(mock_decoder->pts_heap_.IsEmpty());
// Decode a second time, which should yield the first frame.
mock_decoder->DoDecode(buffer_, done_cb.CreateTask());
- EXPECT_EQ(FFmpegVideoDecoder::kNormal, mock_decoder->state_);
+ EXPECT_EQ(VideoDecoderImpl::kNormal, mock_decoder->state_);
EXPECT_TRUE(kTestPts1.timestamp == mock_decoder->last_pts_.timestamp);
EXPECT_TRUE(kTestPts1.duration == mock_decoder->last_pts_.duration);
EXPECT_FALSE(mock_decoder->pts_heap_.IsEmpty());
@@ -380,7 +385,7 @@ TEST_F(FFmpegVideoDecoderTest, DoDecode_TestStateTransition) {
// Decode a third time, with a regular buffer. The decode will error
// out, but the state should be the same.
mock_decoder->DoDecode(buffer_, done_cb.CreateTask());
- EXPECT_EQ(FFmpegVideoDecoder::kNormal, mock_decoder->state_);
+ EXPECT_EQ(VideoDecoderImpl::kNormal, mock_decoder->state_);
EXPECT_TRUE(kTestPts1.timestamp == mock_decoder->last_pts_.timestamp);
EXPECT_TRUE(kTestPts1.duration == mock_decoder->last_pts_.duration);
EXPECT_FALSE(mock_decoder->pts_heap_.IsEmpty());
@@ -388,7 +393,7 @@ TEST_F(FFmpegVideoDecoderTest, DoDecode_TestStateTransition) {
// Decode a fourth time, with an end of stream buffer. This should
// yield the second frame, and stay in flushing mode.
mock_decoder->DoDecode(end_of_stream_buffer_, done_cb.CreateTask());
- EXPECT_EQ(FFmpegVideoDecoder::kFlushCodec, mock_decoder->state_);
+ EXPECT_EQ(VideoDecoderImpl::kFlushCodec, mock_decoder->state_);
EXPECT_TRUE(kTestPts2.timestamp == mock_decoder->last_pts_.timestamp);
EXPECT_TRUE(kTestPts2.duration == mock_decoder->last_pts_.duration);
EXPECT_FALSE(mock_decoder->pts_heap_.IsEmpty());
@@ -396,7 +401,7 @@ TEST_F(FFmpegVideoDecoderTest, DoDecode_TestStateTransition) {
// Decode a fifth time with an end of stream buffer. this should
// yield the third frame.
mock_decoder->DoDecode(end_of_stream_buffer_, done_cb.CreateTask());
- EXPECT_EQ(FFmpegVideoDecoder::kFlushCodec, mock_decoder->state_);
+ EXPECT_EQ(VideoDecoderImpl::kFlushCodec, mock_decoder->state_);
EXPECT_TRUE(kTestPts1.timestamp == mock_decoder->last_pts_.timestamp);
EXPECT_TRUE(kTestPts1.duration == mock_decoder->last_pts_.duration);
EXPECT_TRUE(mock_decoder->pts_heap_.IsEmpty());
@@ -404,13 +409,13 @@ TEST_F(FFmpegVideoDecoderTest, DoDecode_TestStateTransition) {
// Decode a sixth time with an end of stream buffer. This should
// Move into kDecodeFinished.
mock_decoder->DoDecode(end_of_stream_buffer_, done_cb.CreateTask());
- EXPECT_EQ(FFmpegVideoDecoder::kDecodeFinished, mock_decoder->state_);
+ EXPECT_EQ(VideoDecoderImpl::kDecodeFinished, mock_decoder->state_);
EXPECT_TRUE(kTestPts1.timestamp == mock_decoder->last_pts_.timestamp);
EXPECT_TRUE(kTestPts1.duration == mock_decoder->last_pts_.duration);
EXPECT_TRUE(mock_decoder->pts_heap_.IsEmpty());
}
-TEST_F(FFmpegVideoDecoderTest, DoDecode_EnqueueVideoFrameError) {
+TEST_F(VideoDecoderImplTest, DoDecode_EnqueueVideoFrameError) {
MockVideoDecodeEngine* mock_engine = new StrictMock<MockVideoDecodeEngine>();
scoped_refptr<DecoderPrivateMock> mock_decoder =
new StrictMock<DecoderPrivateMock>(mock_engine);
@@ -441,13 +446,13 @@ TEST_F(FFmpegVideoDecoderTest, DoDecode_EnqueueVideoFrameError) {
mock_decoder->DoDecode(buffer_, done_cb.CreateTask());
}
-TEST_F(FFmpegVideoDecoderTest, DoDecode_FinishEnqueuesEmptyFrames) {
+TEST_F(VideoDecoderImplTest, DoDecode_FinishEnqueuesEmptyFrames) {
MockVideoDecodeEngine* mock_engine = new StrictMock<MockVideoDecodeEngine>();
scoped_refptr<DecoderPrivateMock> mock_decoder =
new StrictMock<DecoderPrivateMock>(mock_engine);
// Move the decoder into the finished state for this test.
- mock_decoder->state_ = FFmpegVideoDecoder::kDecodeFinished;
+ mock_decoder->state_ = VideoDecoderImpl::kDecodeFinished;
// Expect 2 calls, make two calls. If kDecodeFinished is set, the buffer is
// not even examined.
@@ -461,18 +466,18 @@ TEST_F(FFmpegVideoDecoderTest, DoDecode_FinishEnqueuesEmptyFrames) {
mock_decoder->DoDecode(buffer_, done_cb.CreateTask());
mock_decoder->DoDecode(end_of_stream_buffer_, done_cb.CreateTask());
- EXPECT_EQ(FFmpegVideoDecoder::kDecodeFinished, mock_decoder->state_);
+ EXPECT_EQ(VideoDecoderImpl::kDecodeFinished, mock_decoder->state_);
}
-TEST_F(FFmpegVideoDecoderTest, DoSeek) {
+TEST_F(VideoDecoderImplTest, DoSeek) {
// Simulates receiving a call to DoSeek() while in every possible state. In
// every case, it should clear the timestamp queue, flush the decoder and
// reset the state to kNormal.
const base::TimeDelta kZero;
- const FFmpegVideoDecoder::DecoderState kStates[] = {
- FFmpegVideoDecoder::kNormal,
- FFmpegVideoDecoder::kFlushCodec,
- FFmpegVideoDecoder::kDecodeFinished,
+ const VideoDecoderImpl::DecoderState kStates[] = {
+ VideoDecoderImpl::kNormal,
+ VideoDecoderImpl::kFlushCodec,
+ VideoDecoderImpl::kDecodeFinished,
};
for (size_t i = 0; i < ARRAYSIZE_UNSAFE(kStates); ++i) {
@@ -499,7 +504,7 @@ TEST_F(FFmpegVideoDecoderTest, DoSeek) {
// Seek and verify the results.
mock_decoder->DoSeek(kZero, done_cb.CreateTask());
EXPECT_TRUE(mock_decoder->pts_heap_.IsEmpty());
- EXPECT_EQ(FFmpegVideoDecoder::kNormal, mock_decoder->state_);
+ EXPECT_EQ(VideoDecoderImpl::kNormal, mock_decoder->state_);
}
}
diff --git a/media/media.gyp b/media/media.gyp
index f53c35d..e504d12 100644
--- a/media/media.gyp
+++ b/media/media.gyp
@@ -19,6 +19,7 @@
'target_name': 'media',
'type': '<(library)',
'dependencies': [
+ 'omx_wrapper',
'../base/base.gyp:base',
'../third_party/ffmpeg/ffmpeg.gyp:ffmpeg',
],
@@ -98,6 +99,8 @@
'filters/audio_renderer_base.h',
'filters/audio_renderer_impl.cc',
'filters/audio_renderer_impl.h',
+ 'filters/bitstream_converter.cc',
+ 'filters/bitstream_converter.h',
'filters/decoder_base.h',
'filters/ffmpeg_audio_decoder.cc',
'filters/ffmpeg_audio_decoder.h',
@@ -109,12 +112,20 @@
'filters/ffmpeg_glue.h',
'filters/ffmpeg_interfaces.cc',
'filters/ffmpeg_interfaces.h',
+ 'filters/ffmpeg_video_decode_engine.cc',
+ 'filters/ffmpeg_video_decode_engine.h',
'filters/ffmpeg_video_decoder.cc',
'filters/ffmpeg_video_decoder.h',
'filters/file_data_source.cc',
'filters/file_data_source.h',
'filters/null_audio_renderer.cc',
'filters/null_audio_renderer.h',
+ 'filters/omx_video_decode_engine.cc',
+ 'filters/omx_video_decode_engine.h',
+ 'filters/omx_video_decoder.cc',
+ 'filters/omx_video_decoder.h',
+ 'filters/video_decoder_impl.cc',
+ 'filters/video_decoder_impl.h',
'filters/video_decode_engine.h',
'filters/video_renderer_base.cc',
'filters/video_renderer_base.h',
@@ -195,11 +206,12 @@
'base/yuv_convert_unittest.cc',
'filters/audio_renderer_algorithm_ola_unittest.cc',
'filters/audio_renderer_base_unittest.cc',
+ 'filters/bitstream_converter_unittest.cc',
'filters/ffmpeg_demuxer_unittest.cc',
'filters/ffmpeg_glue_unittest.cc',
'filters/ffmpeg_video_decode_engine_unittest.cc',
- 'filters/ffmpeg_video_decoder_unittest.cc',
'filters/file_data_source_unittest.cc',
+ 'filters/video_decoder_impl_unittest.cc',
'filters/video_renderer_base_unittest.cc',
],
'conditions': [
@@ -257,9 +269,8 @@
'target_name': 'omx_test',
'type': 'executable',
'dependencies': [
- 'omx',
+ 'omx_wrapper',
'../base/base.gyp:base',
- '../third_party/openmax/openmax.gyp:il',
],
'sources': [
'omx/omx_test.cc',
@@ -269,7 +280,7 @@
'target_name': 'omx_unittests',
'type': 'executable',
'dependencies': [
- 'omx',
+ 'omx_wrapper',
'../base/base.gyp:base',
'../base/base.gyp:base_i18n',
'../testing/gtest.gyp:gtest',
@@ -280,7 +291,7 @@
],
},
{
- 'target_name': 'omx',
+ 'target_name': 'omx_wrapper',
'type': '<(library)',
'dependencies': [
'../base/base.gyp:base',
@@ -292,6 +303,9 @@
'omx/omx_codec.cc',
'omx/omx_codec.h',
],
+ 'export_dependent_settings': [
+ '../third_party/openmax/openmax.gyp:il',
+ ],
},
],
'conditions': [
diff --git a/media/omx/omx_codec.cc b/media/omx/omx_codec.cc
index 6c42b3c..d67798d56 100644
--- a/media/omx/omx_codec.cc
+++ b/media/omx/omx_codec.cc
@@ -81,7 +81,8 @@ void OmxCodec::Feed(InputBuffer* buffer, FeedCallback* callback) {
}
void OmxCodec::Flush(Callback* callback) {
- // TODO(hclam): implement.
+ callback->Run();
+ delete callback;
}
OmxCodec::State OmxCodec::GetState() const {
@@ -349,8 +350,9 @@ void OmxCodec::Transition_EmptyToLoaded() {
else
LOG(ERROR) << "Error: Unsupported codec " << codec_;
// Assume QCIF.
- port_format.format.video.nFrameWidth = 176;
- port_format.format.video.nFrameHeight = 144;
+ // TODO(ajwong): This MUST come from the client library somehow.
+ port_format.format.video.nFrameWidth = 720;
+ port_format.format.video.nFrameHeight = 480;
omxresult = OMX_SetParameter(decoder_handle_,
OMX_IndexParamPortDefinition,
&port_format);
diff --git a/media/tools/player_x11/player_x11.cc b/media/tools/player_x11/player_x11.cc
index ac8bbdd..a734574 100644
--- a/media/tools/player_x11/player_x11.cc
+++ b/media/tools/player_x11/player_x11.cc
@@ -19,6 +19,7 @@
#include "media/filters/ffmpeg_video_decoder.h"
#include "media/filters/file_data_source.h"
#include "media/filters/null_audio_renderer.h"
+#include "media/filters/omx_video_decoder.h"
#include "media/tools/player_x11/x11_video_renderer.h"
Display* g_display = NULL;
@@ -64,6 +65,9 @@ bool InitPipeline(MessageLoop* message_loop,
factories->AddFactory(media::FileDataSource::CreateFactory());
factories->AddFactory(media::FFmpegAudioDecoder::CreateFactory());
factories->AddFactory(media::FFmpegDemuxer::CreateFilterFactory());
+ if (CommandLine::ForCurrentProcess()->HasSwitch("use-omx")) {
+ factories->AddFactory(media::OmxVideoDecoder::CreateFactory());
+ }
factories->AddFactory(media::FFmpegVideoDecoder::CreateFactory());
factories->AddFactory(X11VideoRenderer::CreateFactory(g_display, g_window));
@@ -153,11 +157,11 @@ int main(int argc, char** argv) {
usleep(10000);
}
}
+ pipeline->Stop(NULL);
+ } else{
+ std::cout << "Pipeline initialization failed..." << std::endl;
}
- std::cout << "Stopping..." << std::endl;
- pipeline->Stop(NULL);
-
// Cleanup tasks.
thread->Stop();
XDestroyWindow(g_display, g_window);