summaryrefslogtreecommitdiffstats
path: root/media
diff options
context:
space:
mode:
authorscherkus@chromium.org <scherkus@chromium.org@0039d316-1c4b-4281-b951-d872f2087c98>2009-03-10 18:06:25 +0000
committerscherkus@chromium.org <scherkus@chromium.org@0039d316-1c4b-4281-b951-d872f2087c98>2009-03-10 18:06:25 +0000
commit35751ccc3151bdca7383a3368d26834511125f24 (patch)
treec5123b55aec70f35cea48a7a77e60d2a022d3f3b /media
parent048539b80280b675fb545ad846e3b912da1c7a0f (diff)
downloadchromium_src-35751ccc3151bdca7383a3368d26834511125f24.zip
chromium_src-35751ccc3151bdca7383a3368d26834511125f24.tar.gz
chromium_src-35751ccc3151bdca7383a3368d26834511125f24.tar.bz2
Checking in media::FFmpegDemuxer and tests.
FFmpegDemuxer is a Demuxer implemenation using FFmpeg's libavformat. It is written in a way to work with any format and assume that the downstream decoders can interpret FFmpeg's CodecID enumerations. Review URL: http://codereview.chromium.org/39295 git-svn-id: svn://svn.chromium.org/chrome/trunk/src@11346 0039d316-1c4b-4281-b951-d872f2087c98
Diffstat (limited to 'media')
-rw-r--r--media/base/pipeline.h7
-rw-r--r--media/build/media.vcproj8
-rw-r--r--media/build/media_unittests.vcproj4
-rw-r--r--media/filters/ffmpeg_demuxer.cc281
-rw-r--r--media/filters/ffmpeg_demuxer.h127
-rw-r--r--media/filters/ffmpeg_demuxer_unittest.cc360
6 files changed, 786 insertions, 1 deletions
diff --git a/media/base/pipeline.h b/media/base/pipeline.h
index da3dfc4..58883cc 100644
--- a/media/base/pipeline.h
+++ b/media/base/pipeline.h
@@ -31,7 +31,12 @@ enum PipelineError {
PIPELINE_ERROR_REQUIRED_FILTER_MISSING,
PIPELINE_ERROR_OUT_OF_MEMORY,
PIPELINE_ERROR_COULD_NOT_RENDER,
- PIPELINE_ERROR_READ
+ PIPELINE_ERROR_READ,
+
+ // Demuxer related errors.
+ DEMUXER_ERROR_COULD_NOT_OPEN,
+ DEMUXER_ERROR_COULD_NOT_PARSE,
+ DEMUXER_ERROR_NO_SUPPORTED_STREAMS,
};
// Base class for Pipeline class which allows for read-only access to members.
diff --git a/media/build/media.vcproj b/media/build/media.vcproj
index d0a78f6..37b7977 100644
--- a/media/build/media.vcproj
+++ b/media/build/media.vcproj
@@ -221,6 +221,14 @@
>
</File>
<File
+ RelativePath="..\filters\ffmpeg_demuxer.cc"
+ >
+ </File>
+ <File
+ RelativePath="..\filters\ffmpeg_demuxer.h"
+ >
+ </File>
+ <File
RelativePath="..\filters\ffmpeg_glue.cc"
>
</File>
diff --git a/media/build/media_unittests.vcproj b/media/build/media_unittests.vcproj
index 214bab6d3..4e18337 100644
--- a/media/build/media_unittests.vcproj
+++ b/media/build/media_unittests.vcproj
@@ -192,6 +192,10 @@
Name="filters"
>
<File
+ RelativePath="..\filters\ffmpeg_demuxer_unittest.cc"
+ >
+ </File>
+ <File
RelativePath="..\filters\ffmpeg_glue_unittest.cc"
>
</File>
diff --git a/media/filters/ffmpeg_demuxer.cc b/media/filters/ffmpeg_demuxer.cc
new file mode 100644
index 0000000..4344147
--- /dev/null
+++ b/media/filters/ffmpeg_demuxer.cc
@@ -0,0 +1,281 @@
+// Copyright (c) 2009 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/string_util.h"
+#include "base/time.h"
+#include "media/base/filter_host.h"
+#include "media/filters/ffmpeg_common.h"
+#include "media/filters/ffmpeg_demuxer.h"
+#include "media/filters/ffmpeg_glue.h"
+
+namespace media {
+
+//
+// AVPacketBuffer
+//
+class AVPacketBuffer : public Buffer {
+ public:
+ AVPacketBuffer(AVPacket* packet, const base::TimeDelta& timestamp,
+ const base::TimeDelta& duration)
+ : packet_(packet) {
+ DCHECK(packet);
+ SetTimestamp(timestamp);
+ SetDuration(duration);
+ }
+
+ virtual ~AVPacketBuffer() {
+ av_free_packet(packet_.get());
+ }
+
+ // Buffer implementation.
+ virtual const char* GetData() const {
+ return reinterpret_cast<const char*>(packet_->data);
+ }
+
+ virtual size_t GetDataSize() const {
+ return static_cast<size_t>(packet_->size);
+ }
+
+ private:
+ scoped_ptr<AVPacket> packet_;
+
+ DISALLOW_COPY_AND_ASSIGN(AVPacketBuffer);
+};
+
+
+//
+// FFmpegDemuxerStream
+//
+FFmpegDemuxerStream::FFmpegDemuxerStream(FFmpegDemuxer* demuxer,
+ const AVStream& stream)
+ : demuxer_(demuxer) {
+ DCHECK(demuxer_);
+
+ // Determine our media format.
+ switch (stream.codec->codec_type) {
+ case CODEC_TYPE_AUDIO:
+ media_format_.SetAsString(MediaFormat::kMimeType,
+ mime_type::kFFmpegAudio);
+ media_format_.SetAsInteger(MediaFormat::kChannels,
+ stream.codec->channels);
+ media_format_.SetAsInteger(MediaFormat::kSampleRate,
+ stream.codec->sample_rate);
+ break;
+ case CODEC_TYPE_VIDEO:
+ media_format_.SetAsString(MediaFormat::kMimeType,
+ mime_type::kFFmpegVideo);
+ media_format_.SetAsInteger(MediaFormat::kHeight,
+ stream.codec->height);
+ media_format_.SetAsInteger(MediaFormat::kWidth,
+ stream.codec->width);
+ break;
+ default:
+ NOTREACHED();
+ break;
+ }
+ int codec_id = static_cast<int>(stream.codec->codec_id);
+ media_format_.SetAsInteger(kFFmpegCodecID, codec_id);
+
+ // Calculate the time base and duration in microseconds.
+ int64 time_base_us = static_cast<int64>(av_q2d(stream.time_base) *
+ base::Time::kMicrosecondsPerSecond);
+ int64 duration_us = static_cast<int64>(time_base_us * stream.duration);
+ time_base_ = base::TimeDelta::FromMicroseconds(time_base_us);
+ duration_ = base::TimeDelta::FromMicroseconds(duration_us);
+}
+
+FFmpegDemuxerStream::~FFmpegDemuxerStream() {
+ // Since |input_queue_| and |output_queue_| use scoped_refptr everything
+ // should get released.
+}
+
+bool FFmpegDemuxerStream::HasPendingReads() {
+ AutoLock auto_lock(lock_);
+ return !output_queue_.empty();
+}
+
+void FFmpegDemuxerStream::EnqueuePacket(AVPacket* packet) {
+ base::TimeDelta timestamp = time_base_ * packet->pts;
+ base::TimeDelta duration = time_base_ * packet->duration;
+ Buffer* buffer = new AVPacketBuffer(packet, timestamp, duration);
+ DCHECK(buffer);
+ {
+ AutoLock auto_lock(lock_);
+ input_queue_.push_back(buffer);
+ }
+ FulfillPendingReads();
+}
+
+const MediaFormat* FFmpegDemuxerStream::GetMediaFormat() {
+ return &media_format_;
+}
+
+void FFmpegDemuxerStream::Read(Assignable<Buffer>* buffer) {
+ DCHECK(buffer);
+ {
+ AutoLock auto_lock(lock_);
+ output_queue_.push_back(scoped_refptr< Assignable<Buffer> >(buffer));
+ }
+ if (FulfillPendingReads()) {
+ demuxer_->ScheduleDemux();
+ }
+}
+
+bool FFmpegDemuxerStream::FulfillPendingReads() {
+ bool pending_reads = false;
+ while (true) {
+ scoped_refptr<Buffer> buffer_in;
+ scoped_refptr< Assignable<Buffer> > buffer_out;
+ {
+ AutoLock auto_lock(lock_);
+ pending_reads = !output_queue_.empty();
+ if (input_queue_.empty() || output_queue_.empty()) {
+ break;
+ }
+ buffer_in = input_queue_.front();
+ buffer_out = output_queue_.front();
+ input_queue_.pop_front();
+ output_queue_.pop_front();
+ }
+ buffer_out->SetBuffer(buffer_in);
+ buffer_out->OnAssignment();
+ }
+ return pending_reads;
+}
+
+
+//
+// FFmpegDemuxer
+//
+FFmpegDemuxer::FFmpegDemuxer()
+ : demuxing_(false),
+ format_context_(NULL) {
+}
+
+FFmpegDemuxer::~FFmpegDemuxer() {
+ if (format_context_) {
+ av_free(format_context_);
+ }
+ while (!streams_.empty()) {
+ delete streams_.back();
+ streams_.pop_back();
+ }
+}
+
+void FFmpegDemuxer::ScheduleDemux() {
+ if (!demuxing_) {
+ demuxing_ = true;
+ host_->PostTask(NewRunnableMethod(this, &FFmpegDemuxer::Demux));
+ }
+}
+
+void FFmpegDemuxer::Stop() {
+ // TODO(scherkus): implement Stop().
+ NOTIMPLEMENTED();
+}
+
+bool FFmpegDemuxer::Initialize(DataSource* data_source) {
+ // In order to get FFmpeg to use |data_source| for file IO we must transfer
+ // ownership via FFmpegGlue. We'll add |data_source| to FFmpegGlue and pass
+ // the resulting key to FFmpeg. FFmpeg will pass the key to FFmpegGlue which
+ // will take care of attaching |data_source| to an FFmpeg context. After
+ // we finish initializing the FFmpeg context we can remove |data_source| from
+ // FFmpegGlue.
+ //
+ // Refer to media/filters/ffmpeg_glue.h for details.
+
+ // Add our data source and get our unique key.
+ std::string key = FFmpegGlue::get()->AddDataSource(data_source);
+
+ // Open FFmpeg AVFormatContext.
+ DCHECK(!format_context_);
+ int result = av_open_input_file(&format_context_, key.c_str(), NULL, 0, NULL);
+
+ // Remove our data source.
+ FFmpegGlue::get()->RemoveDataSource(data_source);
+
+ if (result < 0) {
+ host_->Error(DEMUXER_ERROR_COULD_NOT_OPEN);
+ return false;
+ }
+
+ // Fully initialize AVFormatContext by parsing the stream a little.
+ result = av_find_stream_info(format_context_);
+ if (result < 0) {
+ host_->Error(DEMUXER_ERROR_COULD_NOT_PARSE);
+ return false;
+ }
+
+ // Create demuxer streams for all supported streams.
+ base::TimeDelta max_duration;
+ for (size_t i = 0; i < format_context_->nb_streams; ++i) {
+ CodecType codec_type = format_context_->streams[i]->codec->codec_type;
+ if (codec_type == CODEC_TYPE_AUDIO || codec_type == CODEC_TYPE_VIDEO) {
+ AVStream* stream = format_context_->streams[i];
+ FFmpegDemuxerStream* demuxer_stream
+ = new FFmpegDemuxerStream(this, *stream);
+ DCHECK(demuxer_stream);
+ streams_.push_back(demuxer_stream);
+ max_duration = std::max(max_duration, demuxer_stream->duration());
+ }
+ }
+ if (streams_.empty()) {
+ host_->Error(DEMUXER_ERROR_NO_SUPPORTED_STREAMS);
+ return false;
+ }
+
+ // We have at least one supported stream, set the duration and notify we're
+ // done initializing.
+ host_->SetDuration(max_duration);
+ host_->InitializationComplete();
+ return true;
+}
+
+size_t FFmpegDemuxer::GetNumberOfStreams() {
+ return streams_.size();
+}
+
+DemuxerStream* FFmpegDemuxer::GetStream(int stream) {
+ DCHECK(stream >= 0);
+ DCHECK(stream < static_cast<int>(streams_.size()));
+ return streams_[stream];
+}
+
+void FFmpegDemuxer::Demux() {
+ DCHECK(demuxing_);
+
+ // Loop until we've satisfied every stream.
+ while (StreamsHavePendingReads()) {
+ // Allocate and read an AVPacket from the media.
+ scoped_ptr<AVPacket> packet(new AVPacket());
+ int result = av_read_frame(format_context_, packet.get());
+ if (result < 0) {
+ // TODO(scherkus): handle end of stream by marking Buffer with the end of
+ // stream flag.
+ NOTIMPLEMENTED();
+ break;
+ }
+
+ // Queue the packet with the appropriate stream.
+ DCHECK(packet->stream_index >= 0);
+ DCHECK(packet->stream_index < static_cast<int>(streams_.size()));
+ FFmpegDemuxerStream* demuxer_stream = streams_[packet->stream_index];
+ demuxer_stream->EnqueuePacket(packet.release());
+ }
+
+ // Finished demuxing.
+ demuxing_ = false;
+}
+
+bool FFmpegDemuxer::StreamsHavePendingReads() {
+ StreamVector::iterator iter;
+ for (iter = streams_.begin(); iter != streams_.end(); ++iter) {
+ if ((*iter)->HasPendingReads()) {
+ return true;
+ }
+ }
+ return false;
+}
+
+} // namespace media
diff --git a/media/filters/ffmpeg_demuxer.h b/media/filters/ffmpeg_demuxer.h
new file mode 100644
index 0000000..895dd65
--- /dev/null
+++ b/media/filters/ffmpeg_demuxer.h
@@ -0,0 +1,127 @@
+// Copyright (c) 2009 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Implements the Demuxer interface using FFmpeg's libavformat. At this time
+// will support demuxing any audio/video format thrown at it. The streams
+// output mime types audio/x-ffmpeg and video/x-ffmpeg and include an integer
+// key FFmpegCodecID which contains the CodecID enumeration value. The CodecIDs
+// can be used to create and initialize the corresponding FFmpeg decoder.
+//
+// FFmpegDemuxer sets the duration of pipeline during initialization by using
+// the duration of the longest audio/video stream.
+//
+// NOTE: since FFmpegDemuxer reads packets sequentially without seeking, media
+// files with very large drift between audio/video streams may result in
+// excessive memory consumption.
+
+#ifndef MEDIA_FILTERS_FFMPEG_DEMUXER_H_
+#define MEDIA_FILTERS_FFMPEG_DEMUXER_H_
+
+#include <deque>
+#include <vector>
+
+#include "base/lock.h"
+#include "media/base/buffers.h"
+#include "media/base/factory.h"
+#include "media/base/filters.h"
+#include "media/base/media_format.h"
+
+// FFmpeg forward declarations.
+struct AVCodecContext;
+struct AVBitStreamFilterContext;
+struct AVFormatContext;
+struct AVPacket;
+struct AVStream;
+enum CodecID;
+
+namespace media {
+
+class FFmpegDemuxer;
+
+class FFmpegDemuxerStream : public DemuxerStream {
+ public:
+ // Maintains a reference to |demuxer| and initializes itself using information
+ // inside |stream|.
+ FFmpegDemuxerStream(FFmpegDemuxer* demuxer, const AVStream& stream);
+
+ virtual ~FFmpegDemuxerStream();
+
+ // Returns true is this stream has pending reads, false otherwise.
+ bool HasPendingReads();
+
+ // Enqueues and takes ownership over the given AVPacket.
+ void EnqueuePacket(AVPacket* packet);
+
+ // Returns the duration of this stream.
+ base::TimeDelta duration() { return duration_; }
+
+ // DemuxerStream implementation.
+ virtual const MediaFormat* GetMediaFormat();
+ virtual void Read(Assignable<Buffer>* buffer);
+
+ private:
+ // Returns true if there are still pending reads.
+ bool FulfillPendingReads();
+
+ FFmpegDemuxer* demuxer_;
+ MediaFormat media_format_;
+ base::TimeDelta time_base_;
+ base::TimeDelta duration_;
+ Lock lock_;
+
+ typedef std::deque< scoped_refptr<Buffer> > InputQueue;
+ InputQueue input_queue_;
+
+ typedef std::deque< scoped_refptr< Assignable<Buffer> > > OutputQueue;
+ OutputQueue output_queue_;
+
+ DISALLOW_COPY_AND_ASSIGN(FFmpegDemuxerStream);
+};
+
+class FFmpegDemuxer : public Demuxer {
+ public:
+ // FilterFactory provider.
+ static FilterFactory* CreateFilterFactory() {
+ return new FilterFactoryImpl0<FFmpegDemuxer>();
+ }
+
+ // Called by FFmpegDemuxerStreams to schedule a Demux() task.
+ void ScheduleDemux();
+
+ // MediaFilter implementation.
+ virtual void Stop();
+
+ // Demuxer implementation.
+ virtual bool Initialize(DataSource* data_source);
+ virtual size_t GetNumberOfStreams();
+ virtual DemuxerStream* GetStream(int stream_id);
+
+ private:
+ // Only allow a factory to create this class.
+ friend class FilterFactoryImpl0<FFmpegDemuxer>;
+ FFmpegDemuxer();
+ virtual ~FFmpegDemuxer();
+
+ // Demuxing task scheduled by streams.
+ void Demux();
+
+ // Returns true if any of the streams have pending reads.
+ bool StreamsHavePendingReads();
+
+ // Flag to prevent multiple Demux() tasks from being scheduled.
+ bool demuxing_;
+
+ // FFmpeg context handle.
+ AVFormatContext* format_context_;
+
+ // Vector of streams.
+ typedef std::vector<FFmpegDemuxerStream*> StreamVector;
+ StreamVector streams_;
+
+ DISALLOW_COPY_AND_ASSIGN(FFmpegDemuxer);
+};
+
+} // namespace media
+
+#endif // MEDIA_FILTERS_FFMPEG_DEMUXER_H_
diff --git a/media/filters/ffmpeg_demuxer_unittest.cc b/media/filters/ffmpeg_demuxer_unittest.cc
new file mode 100644
index 0000000..2c4ecbe
--- /dev/null
+++ b/media/filters/ffmpeg_demuxer_unittest.cc
@@ -0,0 +1,360 @@
+// Copyright (c) 2009 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include <deque>
+
+#include "media/base/filter_host.h"
+#include "media/base/filters.h"
+#include "media/base/mock_filter_host.h"
+#include "media/base/mock_media_filters.h"
+#include "media/filters/ffmpeg_common.h"
+#include "media/filters/ffmpeg_demuxer.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+// FFmpeg mocks to remove dependency on having the DLLs present.
+extern "C" {
+static const size_t kMaxStreams = 3;
+static AVFormatContext g_format;
+static AVStream g_streams[kMaxStreams];
+static AVCodecContext g_audio_codec;
+static AVCodecContext g_video_codec;
+static AVCodecContext g_data_codec;
+struct AVPacket g_packet;
+
+// FFmpeg return codes for various functions.
+static int g_av_open_input_file = 0;
+static int g_av_find_stream_info = 0;
+static int g_av_read_frame = 0;
+
+// Counts the number of packets "allocated" by av_read_frame and "released" by
+// av_free_packet. This should always be zero after everything is cleaned up.
+static int g_oustanding_packets = 0;
+
+int av_open_input_file(AVFormatContext** format, const char* filename,
+ AVInputFormat* input_format, int buffer_size,
+ AVFormatParameters* parameters) {
+ EXPECT_FALSE(input_format) << "AVInputFormat should be NULL.";
+ EXPECT_FALSE(buffer_size) << "buffer_size should be 0.";
+ EXPECT_FALSE(parameters) << "AVFormatParameters should be NULL.";
+ if (g_av_open_input_file < 0) {
+ *format = NULL;
+ } else {
+ *format = &g_format;
+ }
+ return g_av_open_input_file;
+}
+
+int av_find_stream_info(AVFormatContext* format) {
+ EXPECT_EQ(&g_format, format);
+ return g_av_find_stream_info;
+}
+
+void av_free(void* ptr) {
+ EXPECT_EQ(&g_format, ptr);
+}
+
+// Our packet destroying function.
+void DestructPacket(AVPacket* packet) {
+ --g_oustanding_packets;
+}
+
+int av_read_frame(AVFormatContext* format, AVPacket* packet) {
+ EXPECT_EQ(&g_format, format);
+ memcpy(packet, &g_packet, sizeof(g_packet));
+ packet->destruct = &DestructPacket;
+ if (g_av_read_frame == 0) {
+ ++g_oustanding_packets;
+ }
+ return g_av_read_frame;
+}
+
+} // extern "C"
+
+using namespace media;
+
+namespace {
+
+void InitializeFFmpegMocks() {
+ // Initialize function return codes.
+ g_av_open_input_file = 0;
+ g_av_find_stream_info = 0;
+ g_av_read_frame = 0;
+
+ // Initialize AVFormatContext structure.
+ memset(&g_format, 0, sizeof(g_format));
+
+ // Initialize AVStream structures.
+ for (size_t i = 0; i < kMaxStreams; ++i) {
+ memset(&g_streams[i], 0, sizeof(g_streams[i]));
+ g_streams[i].time_base.den = 1 * base::Time::kMicrosecondsPerSecond;
+ g_streams[i].time_base.num = 1;
+ }
+
+ // Initialize AVCodexContext structures.
+ memset(&g_audio_codec, 0, sizeof(g_audio_codec));
+ g_audio_codec.codec_type = CODEC_TYPE_AUDIO;
+ g_audio_codec.codec_id = CODEC_ID_VORBIS;
+ g_audio_codec.channels = 2;
+ g_audio_codec.sample_rate = 44100;
+
+ memset(&g_video_codec, 0, sizeof(g_video_codec));
+ g_video_codec.codec_type = CODEC_TYPE_VIDEO;
+ g_video_codec.codec_id = CODEC_ID_THEORA;
+ g_video_codec.height = 720;
+ g_video_codec.width = 1280;
+
+ memset(&g_data_codec, 0, sizeof(g_data_codec));
+ g_data_codec.codec_type = CODEC_TYPE_DATA;
+ g_data_codec.codec_id = CODEC_ID_NONE;
+
+ // Initialize AVPacket structure.
+ memset(&g_packet, 0, sizeof(g_packet));
+}
+
+// Simple implementation of Assignable<Buffer> that lets us poke at values.
+class TestBuffer : public Assignable<Buffer> {
+ public:
+ TestBuffer() : assigned_(false) {}
+ virtual ~TestBuffer() {}
+
+ // Assignable<Buffer> implementation.
+ virtual void SetBuffer(Buffer* buffer) {
+ buffer_ = buffer;
+ }
+
+ void OnAssignment() {
+ EXPECT_FALSE(assigned_);
+ assigned_ = true;
+ }
+
+ // Mock getters/setters.
+ Buffer* buffer() { return buffer_; }
+ bool assigned() { return assigned_; }
+
+ private:
+ scoped_refptr<Buffer> buffer_;
+ bool assigned_;
+};
+
+} // namespace
+
+TEST(FFmpegDemuxerTest, InitializeFailure) {
+ InitializeFFmpegMocks();
+
+ // Get FFmpegDemuxer's filter factory.
+ scoped_refptr<FilterFactory> factory = FFmpegDemuxer::CreateFilterFactory();
+
+ // Should only accept application/octet-stream type.
+ MediaFormat media_format;
+ media_format.SetAsString(MediaFormat::kMimeType, "foo/x-bar");
+ scoped_refptr<Demuxer> demuxer(factory->Create<Demuxer>(&media_format));
+ ASSERT_FALSE(demuxer);
+ media_format.Clear();
+ media_format.SetAsString(MediaFormat::kMimeType,
+ mime_type::kApplicationOctetStream);
+ demuxer = factory->Create<Demuxer>(&media_format);
+ ASSERT_TRUE(demuxer);
+
+ // Prepare a filter host and data source for the demuxer.
+ MockPipeline pipeline;
+ scoped_ptr< MockFilterHost<Demuxer> > filter_host;
+ filter_host.reset(new MockFilterHost<Demuxer>(&pipeline, demuxer));
+ MockFilterConfig config;
+ scoped_refptr<MockDataSource> data_source(new MockDataSource(&config));
+
+ // Simulate av_open_input_fail failing.
+ g_av_open_input_file = AVERROR_IO;
+ g_av_find_stream_info = 0;
+ EXPECT_FALSE(demuxer->Initialize(data_source));
+ EXPECT_FALSE(filter_host->IsInitialized());
+ EXPECT_EQ(DEMUXER_ERROR_COULD_NOT_OPEN, pipeline.GetError());
+
+ // Simulate av_find_stream_info failing.
+ g_av_open_input_file = 0;
+ g_av_find_stream_info = AVERROR_IO;
+ demuxer = factory->Create<Demuxer>(&media_format);
+ filter_host.reset(new MockFilterHost<Demuxer>(&pipeline, demuxer));
+ EXPECT_FALSE(demuxer->Initialize(data_source));
+ EXPECT_FALSE(filter_host->IsInitialized());
+ EXPECT_EQ(DEMUXER_ERROR_COULD_NOT_PARSE, pipeline.GetError());
+
+ // Simulate media with no parseable streams.
+ InitializeFFmpegMocks();
+ demuxer = factory->Create<Demuxer>(&media_format);
+ filter_host.reset(new MockFilterHost<Demuxer>(&pipeline, demuxer));
+ EXPECT_FALSE(demuxer->Initialize(data_source));
+ EXPECT_FALSE(filter_host->IsInitialized());
+ EXPECT_EQ(DEMUXER_ERROR_NO_SUPPORTED_STREAMS, pipeline.GetError());
+
+ // Simulate media with a data stream but no audio or video streams.
+ g_format.nb_streams = 1;
+ g_format.streams[0] = &g_streams[0];
+ g_streams[0].codec = &g_data_codec;
+ g_streams[0].duration = 10;
+ demuxer = factory->Create<Demuxer>(&media_format);
+ filter_host.reset(new MockFilterHost<Demuxer>(&pipeline, demuxer));
+ EXPECT_FALSE(demuxer->Initialize(data_source));
+ EXPECT_FALSE(filter_host->IsInitialized());
+ EXPECT_EQ(DEMUXER_ERROR_NO_SUPPORTED_STREAMS, pipeline.GetError());
+}
+
+TEST(FFmpegDemuxerTest, InitializeStreams) {
+ // Simulate media with a data stream, a video stream and audio stream.
+ InitializeFFmpegMocks();
+ g_format.nb_streams = 3;
+ g_format.streams[0] = &g_streams[0];
+ g_format.streams[1] = &g_streams[1];
+ g_format.streams[2] = &g_streams[2];
+ g_streams[0].duration = 1000;
+ g_streams[0].codec = &g_data_codec;
+ g_streams[1].duration = 100;
+ g_streams[1].codec = &g_video_codec;
+ g_streams[2].duration = 10;
+ g_streams[2].codec = &g_audio_codec;
+
+ // Create our pipeline.
+ MockPipeline pipeline;
+
+ // Create our data source.
+ MockFilterConfig config;
+ scoped_refptr<MockDataSource> data_source = new MockDataSource(&config);
+ MockFilterHost<DataSource> filter_host_a(&pipeline, data_source);
+ EXPECT_TRUE(data_source->Initialize("foo"));
+ EXPECT_TRUE(filter_host_a.IsInitialized());
+
+ // Create our demuxer.
+ scoped_refptr<FilterFactory> factory = FFmpegDemuxer::CreateFilterFactory();
+ scoped_refptr<Demuxer> demuxer
+ = factory->Create<Demuxer>(data_source->GetMediaFormat());
+ EXPECT_TRUE(demuxer);
+ MockFilterHost<Demuxer> filter_host_b(&pipeline, demuxer);
+ EXPECT_TRUE(demuxer->Initialize(data_source));
+ EXPECT_TRUE(filter_host_b.IsInitialized());
+ EXPECT_EQ(PIPELINE_OK, pipeline.GetError());
+
+ // Since we ignore data streams, the duration should be equal to the video
+ // stream's duration.
+ EXPECT_EQ(g_streams[1].duration, pipeline.GetDuration().InMicroseconds());
+
+ // Verify that 2 out of 3 streams were created.
+ EXPECT_EQ(2, demuxer->GetNumberOfStreams());
+
+ // First stream should be video.
+ DemuxerStream* stream = demuxer->GetStream(0);
+ ASSERT_TRUE(stream);
+ const MediaFormat* stream_format = stream->GetMediaFormat();
+ std::string mime_type;
+ int result;
+ EXPECT_TRUE(stream_format->GetAsString(MediaFormat::kMimeType, &mime_type));
+ EXPECT_STREQ(mime_type::kFFmpegVideo, mime_type.c_str());
+ EXPECT_TRUE(stream_format->GetAsInteger(kFFmpegCodecID, &result));
+ EXPECT_EQ(CODEC_ID_THEORA, static_cast<CodecID>(result));
+ EXPECT_TRUE(stream_format->GetAsInteger(MediaFormat::kHeight, &result));
+ EXPECT_EQ(g_video_codec.height, result);
+ EXPECT_TRUE(stream_format->GetAsInteger(MediaFormat::kWidth, &result));
+ EXPECT_EQ(g_video_codec.width, result);
+
+ // Second stream should be audio.
+ stream = demuxer->GetStream(1);
+ ASSERT_TRUE(stream);
+ stream_format = stream->GetMediaFormat();
+ EXPECT_TRUE(stream_format->GetAsString(MediaFormat::kMimeType, &mime_type));
+ EXPECT_STREQ(mime_type::kFFmpegAudio, mime_type.c_str());
+ EXPECT_TRUE(stream_format->GetAsInteger(kFFmpegCodecID, &result));
+ EXPECT_EQ(CODEC_ID_VORBIS, static_cast<CodecID>(result));
+ EXPECT_TRUE(stream_format->GetAsInteger(MediaFormat::kChannels, &result));
+ EXPECT_EQ(g_audio_codec.channels, result);
+ EXPECT_TRUE(stream_format->GetAsInteger(MediaFormat::kSampleRate, &result));
+ EXPECT_EQ(g_audio_codec.sample_rate, result);
+}
+
+TEST(FFmpegDemuxerTest, Read) {
+ // Prepare some test data.
+ const int kAudio = 0;
+ const int kVideo = 1;
+ const size_t kDataSize = 4;
+ uint8 audio_data[kDataSize] = {0, 1, 2, 3};
+ uint8 video_data[kDataSize] = {4, 5, 6, 7};
+
+ // Simulate media with a an audio stream and video stream.
+ InitializeFFmpegMocks();
+ g_format.nb_streams = 2;
+ g_format.streams[kAudio] = &g_streams[kAudio];
+ g_format.streams[kVideo] = &g_streams[kVideo];
+ g_streams[kAudio].duration = 10;
+ g_streams[kAudio].codec = &g_audio_codec;
+ g_streams[kVideo].duration = 10;
+ g_streams[kVideo].codec = &g_video_codec;
+
+ // Create our pipeline.
+ MockPipeline pipeline;
+
+ // Create our data source.
+ MockFilterConfig config;
+ scoped_refptr<MockDataSource> data_source = new MockDataSource(&config);
+ MockFilterHost<DataSource> filter_host_a(&pipeline, data_source);
+ EXPECT_TRUE(data_source->Initialize("foo"));
+ EXPECT_TRUE(filter_host_a.IsInitialized());
+
+ // Create our demuxer.
+ scoped_refptr<FilterFactory> factory = FFmpegDemuxer::CreateFilterFactory();
+ scoped_refptr<Demuxer> demuxer
+ = factory->Create<Demuxer>(data_source->GetMediaFormat());
+ EXPECT_TRUE(demuxer);
+ MockFilterHost<Demuxer> filter_host_b(&pipeline, demuxer);
+ EXPECT_TRUE(demuxer->Initialize(data_source));
+ EXPECT_TRUE(filter_host_b.IsInitialized());
+ EXPECT_EQ(PIPELINE_OK, pipeline.GetError());
+
+ // Verify both streams were created.
+ EXPECT_EQ(2, demuxer->GetNumberOfStreams());
+
+ // Get our streams.
+ DemuxerStream* audio_stream = demuxer->GetStream(kAudio);
+ DemuxerStream* video_stream = demuxer->GetStream(kVideo);
+ ASSERT_TRUE(audio_stream);
+ ASSERT_TRUE(video_stream);
+
+ // Prepare our test audio packet.
+ g_packet.stream_index = kAudio;
+ g_packet.data = audio_data;
+ g_packet.size = kDataSize;
+
+ // Attempt a read from the audio stream and run the message loop until done.
+ scoped_refptr<TestBuffer> buffer(new TestBuffer());
+ audio_stream->Read(buffer);
+ pipeline.RunAllTasks();
+ EXPECT_TRUE(buffer->assigned());
+ EXPECT_TRUE(buffer->buffer());
+ EXPECT_EQ(audio_data, (uint8*)buffer->buffer()->GetData());
+ EXPECT_EQ(kDataSize, buffer->buffer()->GetDataSize());
+
+ // Prepare our test video packet.
+ g_packet.stream_index = kVideo;
+ g_packet.data = video_data;
+ g_packet.size = kDataSize;
+
+ // Attempt a read from the video stream and run the message loop until done.
+ buffer = new TestBuffer();
+ video_stream->Read(buffer);
+ pipeline.RunAllTasks();
+ EXPECT_TRUE(buffer->assigned());
+ EXPECT_TRUE(buffer->buffer());
+ EXPECT_EQ(video_data, (uint8*)buffer->buffer()->GetData());
+ EXPECT_EQ(kDataSize, buffer->buffer()->GetDataSize());
+
+ // Simulate end of stream.
+ g_av_read_frame = AVERROR_IO;
+
+ // Attempt a read from the audio stream and run the message loop until done.
+ buffer = new TestBuffer();
+ audio_stream->Read(buffer);
+ pipeline.RunAllTasks();
+ EXPECT_FALSE(buffer->assigned());
+ EXPECT_FALSE(buffer->buffer());
+
+ // Manually release buffer, which should release any remaining AVPackets.
+ buffer = NULL;
+ EXPECT_EQ(0, g_oustanding_packets);
+}