summaryrefslogtreecommitdiffstats
path: root/media
diff options
context:
space:
mode:
authorscherkus@chromium.org <scherkus@chromium.org@0039d316-1c4b-4281-b951-d872f2087c98>2011-11-04 02:04:09 +0000
committerscherkus@chromium.org <scherkus@chromium.org@0039d316-1c4b-4281-b951-d872f2087c98>2011-11-04 02:04:09 +0000
commitf23676abfed3df60d1be4455d422acdb70facf1c (patch)
tree7f4a926b34fe708f1dbf00407eca0f253d1c6ef0 /media
parent760afa9837be0fb92b2994360743ccac35fe7384 (diff)
downloadchromium_src-f23676abfed3df60d1be4455d422acdb70facf1c.zip
chromium_src-f23676abfed3df60d1be4455d422acdb70facf1c.tar.gz
chromium_src-f23676abfed3df60d1be4455d422acdb70facf1c.tar.bz2
Simplify VideoDecodeEngine interface by making everything synchronous.
Although I plan to remove VideoDecodeEngine entirely it requires detangling some of the code first. Other noteworthy changes: - It's no longer valid to call VideoFrameReady(NULL), instead FFmpegVideoDecoder will raise an error the moment it finds one - Buffer recycling has been vanquished (for now), with video frames always allocated in the decoder - Produce/ConsumeVideoFrame() has been replaced by Read() - Video decode byte statistics are only updated if more than 0 bytes were decoded - FFmpegVideoDecodeEngine no longer attempts to preroll Review URL: http://codereview.chromium.org/8417019 git-svn-id: svn://svn.chromium.org/chrome/trunk/src@108612 0039d316-1c4b-4281-b951-d872f2087c98
Diffstat (limited to 'media')
-rw-r--r--media/base/filters.h35
-rw-r--r--media/base/mock_filters.h8
-rw-r--r--media/base/video_decoder_config.cc6
-rw-r--r--media/filters/ffmpeg_video_decoder.cc280
-rw-r--r--media/filters/ffmpeg_video_decoder.h57
-rw-r--r--media/filters/ffmpeg_video_decoder_unittest.cc145
-rw-r--r--media/filters/video_renderer_base.cc300
-rw-r--r--media/filters/video_renderer_base.h57
-rw-r--r--media/filters/video_renderer_base_unittest.cc109
-rw-r--r--media/media.gyp2
-rw-r--r--media/video/ffmpeg_video_decode_engine.cc211
-rw-r--r--media/video/ffmpeg_video_decode_engine.h49
-rw-r--r--media/video/ffmpeg_video_decode_engine_unittest.cc286
-rw-r--r--media/video/video_decode_engine.h98
14 files changed, 443 insertions, 1200 deletions
diff --git a/media/base/filters.h b/media/base/filters.h
index 1624a60..e464202 100644
--- a/media/base/filters.h
+++ b/media/base/filters.h
@@ -164,22 +164,20 @@ class MEDIA_EXPORT VideoDecoder : public Filter {
// Initialize a VideoDecoder with the given DemuxerStream, executing the
// callback upon completion.
// stats_callback is used to update global pipeline statistics.
+ //
+ // TODO(scherkus): switch to PipelineStatus callback.
virtual void Initialize(DemuxerStream* stream, const base::Closure& callback,
const StatisticsCallback& stats_callback) = 0;
- // Renderer provides an output buffer for Decoder to write to. These buffers
- // will be recycled to renderer via the permanent callback.
+ // Request a frame to be decoded and returned via the provided callback.
+ // Only one read may be in flight at any given time.
//
- // We could also pass empty pointer here to let decoder provide buffers pool.
- virtual void ProduceVideoFrame(scoped_refptr<VideoFrame> frame) = 0;
-
- // Installs a permanent callback for passing decoded video output.
+ // Implementations guarantee that the callback will not be called from within
+ // this method.
//
- // A NULL frame represents a decoding error.
- typedef base::Callback<void(scoped_refptr<VideoFrame>)> ConsumeVideoFrameCB;
- void set_consume_video_frame_callback(const ConsumeVideoFrameCB& callback) {
- consume_video_frame_callback_ = callback;
- }
+ // Frames will be non-NULL yet may be end of stream frames.
+ typedef base::Callback<void(scoped_refptr<VideoFrame>)> ReadCB;
+ virtual void Read(const ReadCB& callback) = 0;
// Returns the natural width and height of decoded video in pixels.
//
@@ -188,22 +186,11 @@ class MEDIA_EXPORT VideoDecoder : public Filter {
//
// TODO(scherkus): why not rely on prerolling and decoding a single frame to
// get dimensions?
- virtual gfx::Size natural_size() = 0;
+ virtual const gfx::Size& natural_size() = 0;
protected:
- // Executes the permanent callback to pass off decoded video.
- //
- // TODO(scherkus): name this ConsumeVideoFrame() once we fix the TODO in
- // VideoDecodeEngine::EventHandler to remove ConsumeVideoFrame() from there.
- void VideoFrameReady(scoped_refptr<VideoFrame> frame) {
- consume_video_frame_callback_.Run(frame);
- }
-
VideoDecoder();
virtual ~VideoDecoder();
-
- private:
- ConsumeVideoFrameCB consume_video_frame_callback_;
};
@@ -212,6 +199,8 @@ class MEDIA_EXPORT AudioDecoder : public Filter {
// Initialize a AudioDecoder with the given DemuxerStream, executing the
// callback upon completion.
// stats_callback is used to update global pipeline statistics.
+ //
+ // TODO(scherkus): switch to PipelineStatus callback.
virtual void Initialize(DemuxerStream* stream, const base::Closure& callback,
const StatisticsCallback& stats_callback) = 0;
diff --git a/media/base/mock_filters.h b/media/base/mock_filters.h
index 8199b88..841629d 100644
--- a/media/base/mock_filters.h
+++ b/media/base/mock_filters.h
@@ -187,12 +187,8 @@ class MockVideoDecoder : public VideoDecoder {
MOCK_METHOD3(Initialize, void(DemuxerStream* stream,
const base::Closure& callback,
const StatisticsCallback& stats_callback));
- MOCK_METHOD1(ProduceVideoFrame, void(scoped_refptr<VideoFrame>));
- MOCK_METHOD0(natural_size, gfx::Size());
-
- void VideoFrameReadyForTest(scoped_refptr<VideoFrame> frame) {
- VideoDecoder::VideoFrameReady(frame);
- }
+ MOCK_METHOD1(Read, void(const ReadCB& callback));
+ MOCK_METHOD0(natural_size, const gfx::Size&());
protected:
virtual ~MockVideoDecoder();
diff --git a/media/base/video_decoder_config.cc b/media/base/video_decoder_config.cc
index d055f49..6a7add8 100644
--- a/media/base/video_decoder_config.cc
+++ b/media/base/video_decoder_config.cc
@@ -7,6 +7,7 @@
#include <cmath>
#include "base/logging.h"
+#include "media/base/limits.h"
namespace media {
@@ -90,7 +91,10 @@ bool VideoDecoderConfig::IsValidConfig() const {
frame_rate_numerator_ > 0 &&
frame_rate_denominator_ > 0 &&
aspect_ratio_numerator_ > 0 &&
- aspect_ratio_denominator_ > 0;
+ aspect_ratio_denominator_ > 0 &&
+ natural_size_.width() <= Limits::kMaxDimension &&
+ natural_size_.height() <= Limits::kMaxDimension &&
+ natural_size_.GetArea() <= Limits::kMaxCanvas;
}
VideoCodec VideoDecoderConfig::codec() const {
diff --git a/media/filters/ffmpeg_video_decoder.cc b/media/filters/ffmpeg_video_decoder.cc
index 9be1247..0254f63 100644
--- a/media/filters/ffmpeg_video_decoder.cc
+++ b/media/filters/ffmpeg_video_decoder.cc
@@ -5,12 +5,11 @@
#include "media/filters/ffmpeg_video_decoder.h"
#include "base/bind.h"
-#include "base/callback.h"
#include "base/message_loop.h"
-#include "base/task.h"
#include "media/base/demuxer_stream.h"
#include "media/base/filter_host.h"
#include "media/base/limits.h"
+#include "media/base/video_decoder_config.h"
#include "media/base/video_frame.h"
#include "media/ffmpeg/ffmpeg_common.h"
#include "media/video/ffmpeg_video_decode_engine.h"
@@ -19,7 +18,7 @@ namespace media {
FFmpegVideoDecoder::FFmpegVideoDecoder(MessageLoop* message_loop)
: message_loop_(message_loop),
- state_(kUnInitialized),
+ state_(kUninitialized),
decode_engine_(new FFmpegVideoDecodeEngine()) {
}
@@ -29,17 +28,13 @@ void FFmpegVideoDecoder::Initialize(DemuxerStream* demuxer_stream,
const base::Closure& callback,
const StatisticsCallback& stats_callback) {
if (MessageLoop::current() != message_loop_) {
- message_loop_->PostTask(
- FROM_HERE,
- base::Bind(&FFmpegVideoDecoder::Initialize, this,
- make_scoped_refptr(demuxer_stream),
- callback, stats_callback));
+ message_loop_->PostTask(FROM_HERE, base::Bind(
+ &FFmpegVideoDecoder::Initialize, this,
+ make_scoped_refptr(demuxer_stream), callback, stats_callback));
return;
}
- DCHECK_EQ(MessageLoop::current(), message_loop_);
DCHECK(!demuxer_stream_);
- DCHECK(initialize_callback_.is_null());
if (!demuxer_stream) {
host()->SetError(PIPELINE_ERROR_DECODE);
@@ -48,35 +43,45 @@ void FFmpegVideoDecoder::Initialize(DemuxerStream* demuxer_stream,
}
demuxer_stream_ = demuxer_stream;
- initialize_callback_ = callback;
statistics_callback_ = stats_callback;
const VideoDecoderConfig& config = demuxer_stream->video_decoder_config();
- pts_stream_.Initialize(GetFrameDuration(config));
+ // TODO(scherkus): this check should go in PipelineImpl prior to creating
+ // decoder objects.
+ if (!config.IsValidConfig()) {
+ DLOG(ERROR) << "Invalid video stream -"
+ << " codec: " << config.codec()
+ << " format: " << config.format()
+ << " coded size: [" << config.coded_size().width()
+ << "," << config.coded_size().height() << "]"
+ << " visible rect: [" << config.visible_rect().x()
+ << "," << config.visible_rect().y()
+ << "," << config.visible_rect().width()
+ << "," << config.visible_rect().height() << "]"
+ << " natural size: [" << config.natural_size().width()
+ << "," << config.natural_size().height() << "]"
+ << " frame rate: " << config.frame_rate_numerator()
+ << "/" << config.frame_rate_denominator()
+ << " aspect ratio: " << config.aspect_ratio_numerator()
+ << "/" << config.aspect_ratio_denominator();
- natural_size_ = config.natural_size();
- if (natural_size_.width() > Limits::kMaxDimension ||
- natural_size_.height() > Limits::kMaxDimension ||
- natural_size_.GetArea() > Limits::kMaxCanvas) {
- OnInitializeComplete(false);
+ host()->SetError(PIPELINE_ERROR_DECODE);
+ callback.Run();
return;
}
- state_ = kInitializing;
- decode_engine_->Initialize(this, config);
-}
-
-void FFmpegVideoDecoder::OnInitializeComplete(bool success) {
- DCHECK_EQ(MessageLoop::current(), message_loop_);
- DCHECK(!initialize_callback_.is_null());
+ pts_stream_.Initialize(GetFrameDuration(config));
+ natural_size_ = config.natural_size();
- if (success) {
- state_ = kNormal;
- } else {
+ if (!decode_engine_->Initialize(config)) {
host()->SetError(PIPELINE_ERROR_DECODE);
+ callback.Run();
+ return;
}
- ResetAndRunCB(&initialize_callback_);
+
+ state_ = kNormal;
+ callback.Run();
}
void FFmpegVideoDecoder::Stop(const base::Closure& callback) {
@@ -86,23 +91,20 @@ void FFmpegVideoDecoder::Stop(const base::Closure& callback) {
return;
}
- DCHECK_EQ(MessageLoop::current(), message_loop_);
- DCHECK(uninitialize_callback_.is_null());
-
- uninitialize_callback_ = callback;
- if (state_ != kUnInitialized)
- decode_engine_->Uninitialize();
- else
- OnUninitializeComplete();
+ decode_engine_->Uninitialize();
+ state_ = kUninitialized;
+ callback.Run();
}
-void FFmpegVideoDecoder::OnUninitializeComplete() {
- DCHECK_EQ(MessageLoop::current(), message_loop_);
- DCHECK(!uninitialize_callback_.is_null());
-
- state_ = kStopped;
+void FFmpegVideoDecoder::Seek(base::TimeDelta time, const FilterStatusCB& cb) {
+ if (MessageLoop::current() != message_loop_) {
+ message_loop_->PostTask(FROM_HERE, base::Bind(
+ &FFmpegVideoDecoder::Seek, this, time, cb));
+ return;
+ }
- ResetAndRunCB(&uninitialize_callback_);
+ pts_stream_.Seek(time);
+ cb.Run(PIPELINE_OK);
}
void FFmpegVideoDecoder::Pause(const base::Closure& callback) {
@@ -112,7 +114,6 @@ void FFmpegVideoDecoder::Pause(const base::Closure& callback) {
return;
}
- state_ = kPausing;
callback.Run();
}
@@ -123,67 +124,64 @@ void FFmpegVideoDecoder::Flush(const base::Closure& callback) {
return;
}
- DCHECK_EQ(MessageLoop::current(), message_loop_);
- DCHECK(flush_callback_.is_null());
-
- state_ = kFlushing;
-
- FlushBuffers();
-
- flush_callback_ = callback;
-
decode_engine_->Flush();
-}
-
-void FFmpegVideoDecoder::OnFlushComplete() {
- DCHECK_EQ(MessageLoop::current(), message_loop_);
- DCHECK(!flush_callback_.is_null());
-
- // Everything in the presentation time queue is invalid, clear the queue.
pts_stream_.Flush();
-
- // Mark flush operation had been done.
state_ = kNormal;
+ callback.Run();
+}
- ResetAndRunCB(&flush_callback_);
+void FFmpegVideoDecoder::Read(const ReadCB& callback) {
+ // TODO(scherkus): forced task post since VideoRendererBase::FrameReady() will
+ // call Read() on FFmpegVideoDecoder's thread as we executed |read_cb_|.
+ message_loop_->PostTask(FROM_HERE, base::Bind(
+ &FFmpegVideoDecoder::DoRead, this, callback));
}
-void FFmpegVideoDecoder::Seek(base::TimeDelta time, const FilterStatusCB& cb) {
- if (MessageLoop::current() != message_loop_) {
- message_loop_->PostTask(FROM_HERE,
- base::Bind(&FFmpegVideoDecoder::Seek, this,
- time, cb));
- return;
- }
+const gfx::Size& FFmpegVideoDecoder::natural_size() {
+ return natural_size_;
+}
+void FFmpegVideoDecoder::DoRead(const ReadCB& callback) {
DCHECK_EQ(MessageLoop::current(), message_loop_);
- DCHECK(seek_cb_.is_null());
+ CHECK(!callback.is_null());
+ CHECK(read_cb_.is_null()) << "Overlapping decodes are not supported.";
- pts_stream_.Seek(time);
- seek_cb_ = cb;
- decode_engine_->Seek();
-}
+ // This can happen during shutdown after Stop() has been called.
+ if (state_ == kUninitialized) {
+ return;
+ }
-void FFmpegVideoDecoder::OnSeekComplete() {
- DCHECK_EQ(MessageLoop::current(), message_loop_);
- DCHECK(!seek_cb_.is_null());
+ // Return empty frames if decoding has finished.
+ if (state_ == kDecodeFinished) {
+ callback.Run(VideoFrame::CreateEmptyFrame());
+ return;
+ }
- ResetAndRunCB(&seek_cb_, PIPELINE_OK);
+ read_cb_ = callback;
+ ReadFromDemuxerStream();
}
-void FFmpegVideoDecoder::OnError() {
- VideoFrameReady(NULL);
+
+void FFmpegVideoDecoder::ReadFromDemuxerStream() {
+ DCHECK_NE(state_, kUninitialized);
+ DCHECK_NE(state_, kDecodeFinished);
+ DCHECK(!read_cb_.is_null());
+
+ demuxer_stream_->Read(base::Bind(&FFmpegVideoDecoder::DecodeBuffer, this));
}
-void FFmpegVideoDecoder::OnReadComplete(const scoped_refptr<Buffer>& buffer) {
+void FFmpegVideoDecoder::DecodeBuffer(const scoped_refptr<Buffer>& buffer) {
+ // TODO(scherkus): forced task post since FFmpegDemuxerStream::Read() can
+ // immediately execute our callback on FFmpegVideoDecoder's thread.
message_loop_->PostTask(FROM_HERE, base::Bind(
- &FFmpegVideoDecoder::OnReadCompleteTask, this, buffer));
+ &FFmpegVideoDecoder::DoDecodeBuffer, this, buffer));
}
-void FFmpegVideoDecoder::OnReadCompleteTask(
- const scoped_refptr<Buffer>& buffer) {
+void FFmpegVideoDecoder::DoDecodeBuffer(const scoped_refptr<Buffer>& buffer) {
DCHECK_EQ(MessageLoop::current(), message_loop_);
- DCHECK_NE(state_, kStopped); // because of Flush() before Stop().
+ DCHECK_NE(state_, kUninitialized);
+ DCHECK_NE(state_, kDecodeFinished);
+ DCHECK(!read_cb_.is_null());
// During decode, because reads are issued asynchronously, it is possible to
// receive multiple end of stream buffers since each read is acked. When the
@@ -204,11 +202,11 @@ void FFmpegVideoDecoder::OnReadCompleteTask(
// kNormal -> kFlushCodec:
// When buffer->IsEndOfStream() is first true.
// kNormal -> kDecodeFinished:
- // A catastrophic failure occurs, and decoding needs to stop.
+ // A decoding error occurs and decoding needs to stop.
// kFlushCodec -> kDecodeFinished:
// When avcodec_decode_video2() returns 0 data or errors out.
// (any state) -> kNormal:
- // Any time buffer->IsDiscontinuous() is true.
+ // Any time Flush() is called.
// Transition to kFlushCodec on the first end of stream buffer.
if (state_ == kNormal && buffer->IsEndOfStream()) {
@@ -222,98 +220,48 @@ void FFmpegVideoDecoder::OnReadCompleteTask(
pts_stream_.EnqueuePts(buffer.get());
}
- // Otherwise, attempt to decode a single frame.
- decode_engine_->ConsumeVideoSample(buffer);
-}
-
-void FFmpegVideoDecoder::ProduceVideoFrame(
- scoped_refptr<VideoFrame> video_frame) {
- if (MessageLoop::current() != message_loop_) {
- if (state_ != kStopped) {
- message_loop_->PostTask(FROM_HERE, base::Bind(
- &FFmpegVideoDecoder::ProduceVideoFrame, this, video_frame));
- }
+ scoped_refptr<VideoFrame> video_frame;
+ if (!decode_engine_->Decode(buffer, &video_frame)) {
+ state_ = kDecodeFinished;
+ DeliverFrame(VideoFrame::CreateEmptyFrame());
+ host()->SetError(PIPELINE_ERROR_DECODE);
return;
}
- DCHECK_EQ(MessageLoop::current(), message_loop_);
-
- // Synchronized flushing before stop should prevent this.
- DCHECK_NE(state_, kStopped);
-
- // If the decoding is finished, we just always return empty frames.
- if (state_ == kDecodeFinished) {
- // Signal VideoRenderer the end of the stream event.
- VideoFrameReady(VideoFrame::CreateEmptyFrame());
-
- // Fall through, because we still need to keep record of this frame.
+ // Any successful decode counts!
+ if (buffer->GetDataSize()) {
+ PipelineStatistics statistics;
+ statistics.video_bytes_decoded = buffer->GetDataSize();
+ statistics_callback_.Run(statistics);
}
- // Notify decode engine the available of new frame.
- decode_engine_->ProduceVideoFrame(video_frame);
-}
-
-void FFmpegVideoDecoder::ConsumeVideoFrame(
- scoped_refptr<VideoFrame> video_frame,
- const PipelineStatistics& statistics) {
- DCHECK_EQ(MessageLoop::current(), message_loop_);
- DCHECK_NE(state_, kStopped);
-
- statistics_callback_.Run(statistics);
-
- if (video_frame.get()) {
- if (kPausing == state_ || kFlushing == state_) {
- frame_queue_flushed_.push_back(video_frame);
- if (kFlushing == state_)
- FlushBuffers();
- return;
- }
-
- // If we actually got data back, enqueue a frame.
- pts_stream_.UpdatePtsAndDuration(video_frame.get());
-
- video_frame->SetTimestamp(pts_stream_.current_pts());
- video_frame->SetDuration(pts_stream_.current_duration());
-
- VideoFrameReady(video_frame);
- } else {
- // When in kFlushCodec, any errored decode, or a 0-lengthed frame,
- // is taken as a signal to stop decoding.
+ // If we didn't get a frame then we've either completely finished decoding or
+ // we need more data.
+ if (!video_frame) {
if (state_ == kFlushCodec) {
state_ = kDecodeFinished;
-
- // Signal VideoRenderer the end of the stream event.
- VideoFrameReady(VideoFrame::CreateEmptyFrame());
+ DeliverFrame(VideoFrame::CreateEmptyFrame());
+ return;
}
- }
-}
-
-void FFmpegVideoDecoder::ProduceVideoSample(
- scoped_refptr<Buffer> buffer) {
- DCHECK_EQ(MessageLoop::current(), message_loop_);
- DCHECK_NE(state_, kStopped);
-
- demuxer_stream_->Read(base::Bind(&FFmpegVideoDecoder::OnReadComplete, this));
-}
-gfx::Size FFmpegVideoDecoder::natural_size() {
- return natural_size_;
-}
+ ReadFromDemuxerStream();
+ return;
+ }
-void FFmpegVideoDecoder::FlushBuffers() {
- while (!frame_queue_flushed_.empty()) {
- scoped_refptr<VideoFrame> video_frame;
- video_frame = frame_queue_flushed_.front();
- frame_queue_flushed_.pop_front();
+ // If we got a frame make sure its timestamp is correct before sending it off.
+ pts_stream_.UpdatePtsAndDuration(video_frame.get());
+ video_frame->SetTimestamp(pts_stream_.current_pts());
+ video_frame->SetDuration(pts_stream_.current_duration());
- // Return frames back to the decode engine.
- decode_engine_->ProduceVideoFrame(video_frame);
- }
+ DeliverFrame(video_frame);
}
-void FFmpegVideoDecoder::SetVideoDecodeEngineForTest(
- VideoDecodeEngine* engine) {
- decode_engine_.reset(engine);
+void FFmpegVideoDecoder::DeliverFrame(
+ const scoped_refptr<VideoFrame>& video_frame) {
+ // Reset the callback before running to protect against reentrancy.
+ ReadCB read_cb = read_cb_;
+ read_cb_.Reset();
+ read_cb.Run(video_frame);
}
} // namespace media
diff --git a/media/filters/ffmpeg_video_decoder.h b/media/filters/ffmpeg_video_decoder.h
index ef47977..e89bcbd 100644
--- a/media/filters/ffmpeg_video_decoder.h
+++ b/media/filters/ffmpeg_video_decoder.h
@@ -7,9 +7,9 @@
#include <deque>
+#include "base/memory/scoped_ptr.h"
#include "media/base/filters.h"
#include "media/base/pts_stream.h"
-#include "media/video/video_decode_engine.h"
#include "ui/gfx/size.h"
class MessageLoop;
@@ -18,9 +18,7 @@ namespace media {
class VideoDecodeEngine;
-class MEDIA_EXPORT FFmpegVideoDecoder
- : public VideoDecoder,
- public VideoDecodeEngine::EventHandler {
+class MEDIA_EXPORT FFmpegVideoDecoder : public VideoDecoder {
public:
explicit FFmpegVideoDecoder(MessageLoop* message_loop);
virtual ~FFmpegVideoDecoder();
@@ -35,62 +33,39 @@ class MEDIA_EXPORT FFmpegVideoDecoder
virtual void Initialize(DemuxerStream* demuxer_stream,
const base::Closure& callback,
const StatisticsCallback& stats_callback) OVERRIDE;
- virtual void ProduceVideoFrame(
- scoped_refptr<VideoFrame> video_frame) OVERRIDE;
- virtual gfx::Size natural_size() OVERRIDE;
+ virtual void Read(const ReadCB& callback) OVERRIDE;
+ virtual const gfx::Size& natural_size() OVERRIDE;
private:
- // VideoDecodeEngine::EventHandler interface.
- virtual void OnInitializeComplete(bool success) OVERRIDE;
- virtual void OnUninitializeComplete() OVERRIDE;
- virtual void OnFlushComplete() OVERRIDE;
- virtual void OnSeekComplete() OVERRIDE;
- virtual void OnError() OVERRIDE;
- virtual void ProduceVideoSample(scoped_refptr<Buffer> buffer) OVERRIDE;
- virtual void ConsumeVideoFrame(
- scoped_refptr<VideoFrame> frame,
- const PipelineStatistics& statistics) OVERRIDE;
-
enum DecoderState {
- kUnInitialized,
- kInitializing,
+ kUninitialized,
kNormal,
kFlushCodec,
kDecodeFinished,
- kPausing,
- kFlushing,
- kStopped
};
- void OnFlushComplete(const base::Closure& callback);
- void OnSeekComplete(const base::Closure& callback);
+ // Carries out the reading operation scheduled by Read().
+ void DoRead(const ReadCB& callback);
- // TODO(scherkus): There are two of these to keep read completions
- // asynchronous and media_unittests passing. Remove.
- void OnReadComplete(const scoped_refptr<Buffer>& buffer);
- void OnReadCompleteTask(const scoped_refptr<Buffer>& buffer);
+ // Reads from the demuxer stream with corresponding callback method.
+ void ReadFromDemuxerStream();
+ void DecodeBuffer(const scoped_refptr<Buffer>& buffer);
- // Flush the output buffers that we had held in Paused state.
- void FlushBuffers();
+ // Carries out the decoding operation scheduled by DecodeBuffer().
+ void DoDecodeBuffer(const scoped_refptr<Buffer>& buffer);
- // Injection point for unittest to provide a mock engine. Takes ownership of
- // the provided engine.
- virtual void SetVideoDecodeEngineForTest(VideoDecodeEngine* engine);
+ // Delivers the frame to |read_cb_| and resets the callback.
+ void DeliverFrame(const scoped_refptr<VideoFrame>& video_frame);
MessageLoop* message_loop_;
- PtsStream pts_stream_; // Stream of presentation timestamps.
+ PtsStream pts_stream_;
DecoderState state_;
scoped_ptr<VideoDecodeEngine> decode_engine_;
- base::Closure initialize_callback_;
- base::Closure uninitialize_callback_;
- base::Closure flush_callback_;
- FilterStatusCB seek_cb_;
StatisticsCallback statistics_callback_;
- // Hold video frames when flush happens.
- std::deque<scoped_refptr<VideoFrame> > frame_queue_flushed_;
+ ReadCB read_cb_;
// TODO(scherkus): I think this should be calculated by VideoRenderers based
// on information provided by VideoDecoders (i.e., aspect ratio).
diff --git a/media/filters/ffmpeg_video_decoder_unittest.cc b/media/filters/ffmpeg_video_decoder_unittest.cc
index e0d2ca2..ed36a47 100644
--- a/media/filters/ffmpeg_video_decoder_unittest.cc
+++ b/media/filters/ffmpeg_video_decoder_unittest.cc
@@ -46,12 +46,12 @@ class FFmpegVideoDecoderTest : public testing::Test {
public:
FFmpegVideoDecoderTest()
: decoder_(new FFmpegVideoDecoder(&message_loop_)),
- demuxer_(new StrictMock<MockDemuxerStream>()) {
+ demuxer_(new StrictMock<MockDemuxerStream>()),
+ read_cb_(base::Bind(&FFmpegVideoDecoderTest::FrameReady,
+ base::Unretained(this))) {
CHECK(FFmpegGlue::GetInstance());
decoder_->set_host(&host_);
- decoder_->set_consume_video_frame_callback(base::Bind(
- &FFmpegVideoDecoderTest::ConsumeVideoFrame, base::Unretained(this)));
// Initialize various test buffers.
frame_buffer_.reset(new uint8[kCodedSize.GetArea()]);
@@ -103,35 +103,6 @@ class FFmpegVideoDecoderTest : public testing::Test {
message_loop_.RunAllPending();
}
- // Sets up expectations for FFmpegVideoDecodeEngine to preroll after
- // receiving a Seek(). The adjustment on Read() is due to the decoder
- // delaying frame output.
- //
- // TODO(scherkus): this is madness -- there's no reason for a decoder to
- // assume it should preroll anything.
- void ExpectSeekPreroll() {
- EXPECT_CALL(*demuxer_, Read(_))
- .Times(Limits::kMaxVideoFrames + 1)
- .WillRepeatedly(ReturnBuffer(i_frame_buffer_));
- EXPECT_CALL(statistics_callback_, OnStatistics(_))
- .Times(Limits::kMaxVideoFrames);
- EXPECT_CALL(*this, ConsumeVideoFrame(_))
- .Times(Limits::kMaxVideoFrames);
- }
-
- // Sets up expectations for FFmpegVideoDecodeEngine to preroll after
- // receiving a Seek() but for the end of stream case.
- //
- // TODO(scherkus): this is madness -- there's no reason for a decoder to
- // assume it should preroll anything.
- void ExpectSeekPrerollEndOfStream() {
- EXPECT_CALL(*demuxer_, Read(_))
- .Times(Limits::kMaxVideoFrames)
- .WillRepeatedly(ReturnBuffer(end_of_stream_buffer_));
- EXPECT_CALL(statistics_callback_, OnStatistics(_))
- .Times(Limits::kMaxVideoFrames);
- }
-
// Sets up expectations and actions to put FFmpegVideoDecoder in an active
// decoding state.
void EnterDecodingState() {
@@ -145,10 +116,8 @@ class FFmpegVideoDecoderTest : public testing::Test {
// Sets up expectations and actions to put FFmpegVideoDecoder in an end
// of stream state.
void EnterEndOfStreamState() {
- EXPECT_CALL(statistics_callback_, OnStatistics(_));
-
scoped_refptr<VideoFrame> video_frame;
- CallProduceVideoFrame(&video_frame);
+ Read(&video_frame);
ASSERT_TRUE(video_frame);
EXPECT_TRUE(video_frame->IsEndOfStream());
}
@@ -165,13 +134,15 @@ class FFmpegVideoDecoderTest : public testing::Test {
EXPECT_CALL(statistics_callback_, OnStatistics(_));
- CallProduceVideoFrame(video_frame);
+ Read(video_frame);
}
// Decodes |i_frame_buffer_| and then decodes the data contained in
// the file named |test_file_name|. This function expects both buffers
// to decode to frames that are the same size.
- void DecodeIFrameThenTestFile(const std::string& test_file_name) {
+ void DecodeIFrameThenTestFile(const std::string& test_file_name,
+ size_t expected_width,
+ size_t expected_height) {
Initialize();
scoped_refptr<VideoFrame> video_frame_a;
@@ -188,27 +159,25 @@ class FFmpegVideoDecoderTest : public testing::Test {
EXPECT_CALL(statistics_callback_, OnStatistics(_))
.Times(2);
- CallProduceVideoFrame(&video_frame_a);
- CallProduceVideoFrame(&video_frame_b);
+ Read(&video_frame_a);
+ Read(&video_frame_b);
- size_t expected_width = static_cast<size_t>(kVisibleRect.width());
- size_t expected_height = static_cast<size_t>(kVisibleRect.height());
+ size_t original_width = static_cast<size_t>(kVisibleRect.width());
+ size_t original_height = static_cast<size_t>(kVisibleRect.height());
ASSERT_TRUE(video_frame_a);
ASSERT_TRUE(video_frame_b);
- EXPECT_EQ(expected_width, video_frame_a->width());
- EXPECT_EQ(expected_height, video_frame_a->height());
+ EXPECT_EQ(original_width, video_frame_a->width());
+ EXPECT_EQ(original_height, video_frame_a->height());
EXPECT_EQ(expected_width, video_frame_b->width());
EXPECT_EQ(expected_height, video_frame_b->height());
}
- void CallProduceVideoFrame(scoped_refptr<VideoFrame>* video_frame) {
- EXPECT_CALL(*this, ConsumeVideoFrame(_))
+ void Read(scoped_refptr<VideoFrame>* video_frame) {
+ EXPECT_CALL(*this, FrameReady(_))
.WillOnce(SaveArg<0>(video_frame));
- decoder_->ProduceVideoFrame(VideoFrame::CreateFrame(
- VideoFrame::YV12, kVisibleRect.width(), kVisibleRect.height(),
- kNoTimestamp, kNoTimestamp));
+ decoder_->Read(read_cb_);
message_loop_.RunAllPending();
}
@@ -227,7 +196,7 @@ class FFmpegVideoDecoderTest : public testing::Test {
int64 PopTimestamp() {
scoped_refptr<VideoFrame> video_frame;
- CallProduceVideoFrame(&video_frame);
+ Read(&video_frame);
return video_frame->GetTimestamp().InMicroseconds();
}
@@ -244,7 +213,7 @@ class FFmpegVideoDecoderTest : public testing::Test {
read_callback.Run(i_frame_buffer_);
}
- MOCK_METHOD1(ConsumeVideoFrame, void(scoped_refptr<VideoFrame>));
+ MOCK_METHOD1(FrameReady, void(scoped_refptr<VideoFrame>));
MessageLoop message_loop_;
scoped_refptr<FFmpegVideoDecoder> decoder_;
@@ -253,6 +222,8 @@ class FFmpegVideoDecoderTest : public testing::Test {
StrictMock<MockFilterHost> host_;
VideoDecoderConfig config_;
+ VideoDecoder::ReadCB read_cb_;
+
// Various buffers for testing.
scoped_array<uint8_t> frame_buffer_;
scoped_refptr<Buffer> end_of_stream_buffer_;
@@ -270,7 +241,7 @@ TEST_F(FFmpegVideoDecoderTest, Initialize_Normal) {
Initialize();
}
-TEST_F(FFmpegVideoDecoderTest, Initialize_FindDecoderFails) {
+TEST_F(FFmpegVideoDecoderTest, Initialize_UnsupportedDecoder) {
// Test avcodec_find_decoder() returning NULL.
VideoDecoderConfig config(kUnknownVideoCodec, kVideoFormat,
kCodedSize, kVisibleRect,
@@ -282,6 +253,18 @@ TEST_F(FFmpegVideoDecoderTest, Initialize_FindDecoderFails) {
InitializeWithConfig(config);
}
+TEST_F(FFmpegVideoDecoderTest, Initialize_UnsupportedPixelFormat) {
+ // Ensure decoder handles unsupport pixel formats without crashing.
+ VideoDecoderConfig config(kCodecVP8, VideoFrame::INVALID,
+ kCodedSize, kVisibleRect,
+ kFrameRate.num, kFrameRate.den,
+ kAspectRatio.num, kAspectRatio.den,
+ NULL, 0);
+
+ EXPECT_CALL(host_, SetError(PIPELINE_ERROR_DECODE));
+ InitializeWithConfig(config);
+}
+
TEST_F(FFmpegVideoDecoderTest, Initialize_OpenDecoderFails) {
// Specify Theora w/o extra data so that avcodec_open() fails.
VideoDecoderConfig config(kCodecTheora, kVideoFormat,
@@ -323,11 +306,11 @@ TEST_F(FFmpegVideoDecoderTest, DecodeFrame_0ByteFrame) {
.WillRepeatedly(ReturnBuffer(end_of_stream_buffer_));
EXPECT_CALL(statistics_callback_, OnStatistics(_))
- .Times(3);
+ .Times(2);
- CallProduceVideoFrame(&video_frame_a);
- CallProduceVideoFrame(&video_frame_b);
- CallProduceVideoFrame(&video_frame_c);
+ Read(&video_frame_a);
+ Read(&video_frame_b);
+ Read(&video_frame_c);
ASSERT_TRUE(video_frame_a);
ASSERT_TRUE(video_frame_b);
@@ -345,11 +328,20 @@ TEST_F(FFmpegVideoDecoderTest, DecodeFrame_DecodeError) {
.WillOnce(ReturnBuffer(corrupt_i_frame_buffer_))
.WillRepeatedly(ReturnBuffer(i_frame_buffer_));
+ // The error is only raised on the second decode attempt, so we expect at
+ // least one successful decode but we don't expect FrameReady() to be
+ // executed as an error is raised instead.
+ EXPECT_CALL(statistics_callback_, OnStatistics(_));
+ EXPECT_CALL(host_, SetError(PIPELINE_ERROR_DECODE));
+
+ // Our read should still get satisfied with end of stream frame during an
+ // error.
scoped_refptr<VideoFrame> video_frame;
- CallProduceVideoFrame(&video_frame);
+ Read(&video_frame);
+ ASSERT_TRUE(video_frame);
+ EXPECT_TRUE(video_frame->IsEndOfStream());
- // XXX: SERIOUSLY? This seems broken to call NULL on decoder error.
- EXPECT_FALSE(video_frame);
+ message_loop_.RunAllPending();
}
// Multi-threaded decoders have different behavior than single-threaded
@@ -368,29 +360,31 @@ TEST_F(FFmpegVideoDecoderTest, DecodeFrame_DecodeErrorAtEndOfStream) {
}
// Decode |i_frame_buffer_| and then a frame with a larger width and verify
-// the output size didn't change.
-// TODO(acolwell): Fix InvalidRead detected by Valgrind
-//TEST_F(FFmpegVideoDecoderTest, DecodeFrame_LargerWidth) {
-// DecodeIFrameThenTestFile("vp8-I-frame-640x240");
-//}
+// the output size was adjusted.
+//
+// TODO(acolwell): Fix InvalidRead detected by Valgrind http://crbug.com/102789
+TEST_F(FFmpegVideoDecoderTest, DISABLED_DecodeFrame_LargerWidth) {
+ DecodeIFrameThenTestFile("vp8-I-frame-640x240", 640, 240);
+}
// Decode |i_frame_buffer_| and then a frame with a smaller width and verify
-// the output size didn't change.
+// the output size was adjusted.
TEST_F(FFmpegVideoDecoderTest, DecodeFrame_SmallerWidth) {
- DecodeIFrameThenTestFile("vp8-I-frame-160x240");
+ DecodeIFrameThenTestFile("vp8-I-frame-160x240", 160, 240);
}
// Decode |i_frame_buffer_| and then a frame with a larger height and verify
-// the output size didn't change.
-// TODO(acolwell): Fix InvalidRead detected by Valgrind
-//TEST_F(FFmpegVideoDecoderTest, DecodeFrame_LargerHeight) {
-// DecodeIFrameThenTestFile("vp8-I-frame-320x480");
-//}
+// the output size was adjusted.
+//
+// TODO(acolwell): Fix InvalidRead detected by Valgrind http://crbug.com/102789
+TEST_F(FFmpegVideoDecoderTest, DISABLED_DecodeFrame_LargerHeight) {
+ DecodeIFrameThenTestFile("vp8-I-frame-320x480", 320, 480);
+}
// Decode |i_frame_buffer_| and then a frame with a smaller height and verify
-// the output size didn't change.
+// the output size was adjusted.
TEST_F(FFmpegVideoDecoderTest, DecodeFrame_SmallerHeight) {
- DecodeIFrameThenTestFile("vp8-I-frame-320x120");
+ DecodeIFrameThenTestFile("vp8-I-frame-320x120", 320, 120);
}
// Test pausing when decoder has initialized but not decoded.
@@ -428,9 +422,7 @@ TEST_F(FFmpegVideoDecoderTest, Flush_Decoding) {
}
// Test flushing when decoder has hit end of stream.
-//
-// TODO(scherkus): test is disabled until we clean up buffer recycling.
-TEST_F(FFmpegVideoDecoderTest, DISABLED_Flush_EndOfStream) {
+TEST_F(FFmpegVideoDecoderTest, Flush_EndOfStream) {
Initialize();
EnterDecodingState();
EnterEndOfStreamState();
@@ -440,7 +432,6 @@ TEST_F(FFmpegVideoDecoderTest, DISABLED_Flush_EndOfStream) {
// Test seeking when decoder has initialized but not decoded.
TEST_F(FFmpegVideoDecoderTest, Seek_Initialized) {
Initialize();
- ExpectSeekPreroll();
Seek(1000);
}
@@ -448,7 +439,6 @@ TEST_F(FFmpegVideoDecoderTest, Seek_Initialized) {
TEST_F(FFmpegVideoDecoderTest, Seek_Decoding) {
Initialize();
EnterDecodingState();
- ExpectSeekPreroll();
Seek(1000);
}
@@ -457,7 +447,6 @@ TEST_F(FFmpegVideoDecoderTest, Seek_EndOfStream) {
Initialize();
EnterDecodingState();
EnterEndOfStreamState();
- ExpectSeekPrerollEndOfStream();
Seek(1000);
}
diff --git a/media/filters/video_renderer_base.cc b/media/filters/video_renderer_base.cc
index 44ddd76..2804c80 100644
--- a/media/filters/video_renderer_base.cc
+++ b/media/filters/video_renderer_base.cc
@@ -30,10 +30,12 @@ VideoRendererBase::VideoRendererBase()
: frame_available_(&lock_),
state_(kUninitialized),
thread_(base::kNullThreadHandle),
- pending_reads_(0),
+ pending_read_(false),
pending_paint_(false),
pending_paint_with_last_available_(false),
- playback_rate_(0) {
+ playback_rate_(0),
+ read_cb_(base::Bind(&VideoRendererBase::FrameReady,
+ base::Unretained(this))) {
}
VideoRendererBase::~VideoRendererBase() {
@@ -61,30 +63,29 @@ void VideoRendererBase::Flush(const base::Closure& callback) {
flush_callback_ = callback;
state_ = kFlushing;
- if (!pending_paint_)
- FlushBuffers_Locked();
+ AttemptFlush_Locked();
}
void VideoRendererBase::Stop(const base::Closure& callback) {
- base::PlatformThreadHandle old_thread_handle = base::kNullThreadHandle;
+ base::PlatformThreadHandle thread_to_join = base::kNullThreadHandle;
{
base::AutoLock auto_lock(lock_);
state_ = kStopped;
if (!pending_paint_ && !pending_paint_with_last_available_)
- DoStopOrErrorFlush_Locked();
+ DoStopOrError_Locked();
// Clean up our thread if present.
- if (thread_) {
+ if (thread_ != base::kNullThreadHandle) {
// Signal the thread since it's possible to get stopped with the video
// thread waiting for a read to complete.
frame_available_.Signal();
- old_thread_handle = thread_;
+ thread_to_join = thread_;
thread_ = base::kNullThreadHandle;
}
}
- if (old_thread_handle)
- base::PlatformThread::Join(old_thread_handle);
+ if (thread_to_join != base::kNullThreadHandle)
+ base::PlatformThread::Join(thread_to_join);
// Signal the subclass we're stopping.
OnStop(callback);
@@ -96,35 +97,17 @@ void VideoRendererBase::SetPlaybackRate(float playback_rate) {
}
void VideoRendererBase::Seek(base::TimeDelta time, const FilterStatusCB& cb) {
- bool run_callback = false;
-
{
base::AutoLock auto_lock(lock_);
- // There is a race condition between filters to receive SeekTask().
- // It turns out we could receive buffer from decoder before seek()
- // is called on us. so we do the following:
- // kFlushed => ( Receive first buffer or Seek() ) => kSeeking and
- // kSeeking => ( Receive enough buffers) => kPrerolled. )
- DCHECK(state_ == kPrerolled || state_ == kFlushed || state_ == kSeeking);
+ DCHECK_EQ(state_, kFlushed) << "Must flush prior to seeking.";
DCHECK(!cb.is_null());
DCHECK(seek_cb_.is_null());
- if (state_ == kPrerolled) {
- // Already get enough buffers from decoder.
- run_callback = true;
- } else {
- // Otherwise we are either kFlushed or kSeeking, but without enough
- // buffers we should save the callback function and call it later.
- state_ = kSeeking;
- seek_cb_ = cb;
- }
-
+ state_ = kSeeking;
+ seek_cb_ = cb;
seek_timestamp_ = time;
- ScheduleRead_Locked();
+ AttemptRead_Locked();
}
-
- if (run_callback)
- cb.Run(PIPELINE_OK);
}
void VideoRendererBase::Initialize(VideoDecoder* decoder,
@@ -139,10 +122,6 @@ void VideoRendererBase::Initialize(VideoDecoder* decoder,
statistics_callback_ = stats_callback;
- decoder_->set_consume_video_frame_callback(
- base::Bind(&VideoRendererBase::ConsumeVideoFrame,
- base::Unretained(this)));
-
// Notify the pipeline of the video dimensions.
host()->SetNaturalVideoSize(decoder_->natural_size());
@@ -150,7 +129,8 @@ void VideoRendererBase::Initialize(VideoDecoder* decoder,
// TODO(scherkus): do we trust subclasses not to do something silly while
// we're holding the lock?
if (!OnInitialize(decoder)) {
- EnterErrorState_Locked(PIPELINE_ERROR_INITIALIZATION_FAILED);
+ state_ = kError;
+ host()->SetError(PIPELINE_ERROR_INITIALIZATION_FAILED);
callback.Run();
return;
}
@@ -164,7 +144,8 @@ void VideoRendererBase::Initialize(VideoDecoder* decoder,
// Create our video thread.
if (!base::PlatformThread::Create(0, this, &thread_)) {
NOTREACHED() << "Video thread creation failed";
- EnterErrorState_Locked(PIPELINE_ERROR_INITIALIZATION_FAILED);
+ state_ = kError;
+ host()->SetError(PIPELINE_ERROR_INITIALIZATION_FAILED);
callback.Run();
return;
}
@@ -269,7 +250,7 @@ void VideoRendererBase::ThreadMain() {
// If we got here then:
// 1. next frame's timestamp is already current; or
- // 2. we do not have any current frame yet anyway; or
+ // 2. we do not have a current frame yet; or
// 3. a special case when the stream is badly formatted and
// we got a frame with timestamp greater than overall duration.
// In this case we should proceed anyway and try to obtain the
@@ -290,11 +271,10 @@ void VideoRendererBase::ThreadMain() {
if (remaining_time.InMicroseconds() > 0)
break;
- // Frame dropped: transfer ready frame into done queue and read again.
- frames_queue_done_.push_back(frames_queue_ready_.front());
- frames_queue_ready_.pop_front();
- ScheduleRead_Locked();
+ // Frame dropped: read again.
++frames_dropped;
+ frames_queue_ready_.pop_front();
+ AttemptRead_Locked();
}
// Continue waiting for the current paint to finish.
@@ -307,10 +287,9 @@ void VideoRendererBase::ThreadMain() {
// signal to the client that a new frame is available.
DCHECK(!pending_paint_);
DCHECK(!frames_queue_ready_.empty());
- frames_queue_done_.push_back(current_frame_);
current_frame_ = frames_queue_ready_.front();
frames_queue_ready_.pop_front();
- ScheduleRead_Locked();
+ AttemptRead_Locked();
base::AutoUnlock auto_unlock(lock_);
OnFrameAvailable();
@@ -322,8 +301,7 @@ void VideoRendererBase::GetCurrentFrame(scoped_refptr<VideoFrame>* frame_out) {
DCHECK(!pending_paint_ && !pending_paint_with_last_available_);
if ((!current_frame_ || current_frame_->IsEndOfStream()) &&
- (!last_available_frame_ ||
- last_available_frame_->IsEndOfStream())) {
+ (!last_available_frame_ || last_available_frame_->IsEndOfStream())) {
*frame_out = NULL;
return;
}
@@ -366,161 +344,104 @@ void VideoRendererBase::PutCurrentFrame(scoped_refptr<VideoFrame> frame) {
// frame when this is true.
frame_available_.Signal();
if (state_ == kFlushing) {
- FlushBuffers_Locked();
+ AttemptFlush_Locked();
} else if (state_ == kError || state_ == kStopped) {
- DoStopOrErrorFlush_Locked();
+ DoStopOrError_Locked();
}
}
-void VideoRendererBase::ConsumeVideoFrame(scoped_refptr<VideoFrame> frame) {
- if (frame) {
- PipelineStatistics statistics;
- statistics.video_frames_decoded = 1;
- statistics_callback_.Run(statistics);
- }
+void VideoRendererBase::FrameReady(scoped_refptr<VideoFrame> frame) {
+ DCHECK(frame);
base::AutoLock auto_lock(lock_);
-
- if (!frame) {
- EnterErrorState_Locked(PIPELINE_ERROR_DECODE);
- return;
- }
-
- // Decoder could reach seek state before our Seek() get called.
- // We will enter kSeeking
- if (state_ == kFlushed)
- state_ = kSeeking;
-
- // Synchronous flush between filters should prevent this from happening.
- DCHECK_NE(state_, kStopped);
- if (frame && !frame->IsEndOfStream())
- --pending_reads_;
-
DCHECK_NE(state_, kUninitialized);
DCHECK_NE(state_, kStopped);
DCHECK_NE(state_, kError);
+ DCHECK_NE(state_, kFlushed);
+ CHECK(pending_read_);
- if (state_ == kPaused || state_ == kFlushing) {
- // Decoder are flushing rubbish video frame, we will not display them.
- if (frame && !frame->IsEndOfStream())
- frames_queue_done_.push_back(frame);
- DCHECK_LE(frames_queue_done_.size(),
- static_cast<size_t>(Limits::kMaxVideoFrames));
-
- // Excluding kPause here, because in pause state, we will never
- // transfer out-bounding buffer. We do not flush buffer when Compositor
- // hold reference to our video frame either.
- if (state_ == kFlushing && pending_paint_ == false)
- FlushBuffers_Locked();
+ pending_read_ = false;
+ if (state_ == kFlushing) {
+ AttemptFlush_Locked();
return;
}
// Discard frames until we reach our desired seek timestamp.
if (state_ == kSeeking && !frame->IsEndOfStream() &&
(frame->GetTimestamp() + frame->GetDuration()) <= seek_timestamp_) {
- frames_queue_done_.push_back(frame);
- ScheduleRead_Locked();
- } else {
- frames_queue_ready_.push_back(frame);
- DCHECK_LE(frames_queue_ready_.size(),
- static_cast<size_t>(Limits::kMaxVideoFrames));
- frame_available_.Signal();
+ AttemptRead_Locked();
+ return;
}
- // Check for our preroll complete condition.
- bool new_frame_available = false;
- if (state_ == kSeeking) {
- if (frames_queue_ready_.size() == Limits::kMaxVideoFrames ||
- frame->IsEndOfStream()) {
- // We're paused, so make sure we update |current_frame_| to represent
- // our new location.
- state_ = kPrerolled;
-
- // Because we might remain paused (i.e., we were not playing before we
- // received a seek), we can't rely on ThreadMain() to notify the subclass
- // the frame has been updated.
- scoped_refptr<VideoFrame> first_frame;
- first_frame = frames_queue_ready_.front();
- if (!first_frame->IsEndOfStream()) {
- frames_queue_ready_.pop_front();
- current_frame_ = first_frame;
- }
- new_frame_available = true;
+ // This one's a keeper! Place it in the ready queue.
+ frames_queue_ready_.push_back(frame);
+ DCHECK_LE(frames_queue_ready_.size(),
+ static_cast<size_t>(Limits::kMaxVideoFrames));
+ frame_available_.Signal();
- // If we reach prerolled state before Seek() is called by pipeline,
- // |seek_callback_| is not set, we will return immediately during
- // when Seek() is eventually called.
- if (!seek_cb_.is_null()) {
- ResetAndRunCB(&seek_cb_, PIPELINE_OK);
- }
- }
- } else if (state_ == kFlushing && pending_reads_ == 0 && !pending_paint_) {
- OnFlushDone_Locked();
+ PipelineStatistics statistics;
+ statistics.video_frames_decoded = 1;
+ statistics_callback_.Run(statistics);
+
+ // Always request more decoded video if we have capacity. This serves two
+ // purposes:
+ // 1) Prerolling while paused
+ // 2) Keeps decoding going if video rendering thread starts falling behind
+ if (frames_queue_ready_.size() < Limits::kMaxVideoFrames &&
+ !frame->IsEndOfStream()) {
+ AttemptRead_Locked();
+ return;
}
- if (new_frame_available) {
- base::AutoUnlock auto_unlock(lock_);
- OnFrameAvailable();
- }
-}
+ // If we're at capacity or end of stream while seeking we need to transition
+ // to prerolled.
+ if (state_ == kSeeking) {
+ state_ = kPrerolled;
+
+ // Because we might remain in the prerolled state for an undetermined amount
+ // of time (i.e., we were not playing before we received a seek), we'll
+ // manually update the current frame and notify the subclass below.
+ if (!frames_queue_ready_.front()->IsEndOfStream()) {
+ current_frame_ = frames_queue_ready_.front();
+ frames_queue_ready_.pop_front();
+ }
-void VideoRendererBase::ReadInput(scoped_refptr<VideoFrame> frame) {
- // We should never return empty frames or EOS frame.
- DCHECK(frame && !frame->IsEndOfStream());
+ // ...and we're done seeking!
+ DCHECK(!seek_cb_.is_null());
+ ResetAndRunCB(&seek_cb_, PIPELINE_OK);
- decoder_->ProduceVideoFrame(frame);
- ++pending_reads_;
+ base::AutoUnlock ul(lock_);
+ OnFrameAvailable();
+ }
}
-void VideoRendererBase::ScheduleRead_Locked() {
+void VideoRendererBase::AttemptRead_Locked() {
lock_.AssertAcquired();
DCHECK_NE(kEnded, state_);
- // TODO(jiesun): We use dummy buffer to feed decoder to let decoder to
- // provide buffer pools. In the future, we may want to implement real
- // buffer pool to recycle buffers.
- while (!frames_queue_done_.empty()) {
- scoped_refptr<VideoFrame> video_frame = frames_queue_done_.front();
- frames_queue_done_.pop_front();
- ReadInput(video_frame);
+
+ if (pending_read_ || frames_queue_ready_.size() == Limits::kMaxVideoFrames) {
+ return;
}
+
+ pending_read_ = true;
+ decoder_->Read(read_cb_);
}
-void VideoRendererBase::FlushBuffers_Locked() {
+void VideoRendererBase::AttemptFlush_Locked() {
lock_.AssertAcquired();
- DCHECK(!pending_paint_);
+ DCHECK_EQ(kFlushing, state_);
- // We should never put EOF frame into "done queue".
+ // Get rid of any ready frames.
while (!frames_queue_ready_.empty()) {
- scoped_refptr<VideoFrame> video_frame = frames_queue_ready_.front();
- if (!video_frame->IsEndOfStream()) {
- frames_queue_done_.push_back(video_frame);
- }
frames_queue_ready_.pop_front();
}
- if (current_frame_ && !current_frame_->IsEndOfStream()) {
- frames_queue_done_.push_back(current_frame_);
- }
- current_frame_ = NULL;
-
- // Flush all buffers out to decoder.
- ScheduleRead_Locked();
-
- if (pending_reads_ == 0 && state_ == kFlushing)
- OnFlushDone_Locked();
-}
-
-void VideoRendererBase::OnFlushDone_Locked() {
- lock_.AssertAcquired();
- // Check all buffers are returned to owners.
- DCHECK_EQ(frames_queue_done_.size(), 0u);
- DCHECK(!current_frame_);
- DCHECK(frames_queue_ready_.empty());
- if (!flush_callback_.is_null()) // This ensures callback is invoked once.
+ if (!pending_paint_ && !pending_read_) {
+ state_ = kFlushed;
+ current_frame_ = NULL;
ResetAndRunCB(&flush_callback_);
-
- state_ = kFlushed;
+ }
}
base::TimeDelta VideoRendererBase::CalculateSleepDuration(
@@ -552,59 +473,12 @@ base::TimeDelta VideoRendererBase::CalculateSleepDuration(
static_cast<int64>(sleep.InMicroseconds() / playback_rate));
}
-void VideoRendererBase::EnterErrorState_Locked(PipelineStatus status) {
- lock_.AssertAcquired();
-
- base::Closure callback;
- State old_state = state_;
- state_ = kError;
-
- // Flush frames if we aren't in the middle of a paint. If we
- // are painting then flushing will happen when the paint completes.
- if (!pending_paint_ && !pending_paint_with_last_available_)
- DoStopOrErrorFlush_Locked();
-
- switch (old_state) {
- case kUninitialized:
- case kPrerolled:
- case kPaused:
- case kFlushed:
- case kEnded:
- case kPlaying:
- break;
-
- case kFlushing:
- CHECK(!flush_callback_.is_null());
- std::swap(callback, flush_callback_);
- break;
-
- case kSeeking:
- CHECK(!seek_cb_.is_null());
- ResetAndRunCB(&seek_cb_, status);
- return;
- break;
-
- case kStopped:
- NOTREACHED() << "Should not error if stopped.";
- return;
-
- case kError:
- return;
- }
-
- host()->SetError(status);
-
- if (!callback.is_null())
- callback.Run();
-}
-
-void VideoRendererBase::DoStopOrErrorFlush_Locked() {
+void VideoRendererBase::DoStopOrError_Locked() {
DCHECK(!pending_paint_);
DCHECK(!pending_paint_with_last_available_);
lock_.AssertAcquired();
- FlushBuffers_Locked();
last_available_frame_ = NULL;
- DCHECK_EQ(pending_reads_, 0);
+ DCHECK(!pending_read_);
}
} // namespace media
diff --git a/media/filters/video_renderer_base.h b/media/filters/video_renderer_base.h
index 2125e45..c604f08 100644
--- a/media/filters/video_renderer_base.h
+++ b/media/filters/video_renderer_base.h
@@ -88,25 +88,16 @@ class MEDIA_EXPORT VideoRendererBase
// class executes on.
virtual void OnFrameAvailable() = 0;
- void ReadInput(scoped_refptr<VideoFrame> frame);
-
private:
- // Callback from video decoder to deliver decoded video frames and decrements
- // |pending_reads_|.
- void ConsumeVideoFrame(scoped_refptr<VideoFrame> frame);
-
- // Helper method that schedules an asynchronous read from the decoder and
- // increments |pending_reads_|.
- //
- // Safe to call from any thread.
- void ScheduleRead_Locked();
+ // Callback from the video decoder delivering decoded video frames.
+ void FrameReady(scoped_refptr<VideoFrame> frame);
- // Helper function to finished "flush" operation
- void OnFlushDone_Locked();
+ // Helper method that schedules an asynchronous read from the decoder as long
+ // as there isn't a pending read and we have capacity.
+ void AttemptRead_Locked();
- // Helper method that flushes all video frame in "ready queue" including
- // current frame into "done queue".
- void FlushBuffers_Locked();
+ // Attempts to complete flushing and transition into the flushed state.
+ void AttemptFlush_Locked();
// Calculates the duration to sleep for based on |current_frame_|'s timestamp,
// the next frame timestamp (may be NULL), and the provided playback rate.
@@ -119,7 +110,7 @@ class MEDIA_EXPORT VideoRendererBase
void EnterErrorState_Locked(PipelineStatus status);
// Helper function that flushes the buffers when a Stop() or error occurs.
- void DoStopOrErrorFlush_Locked();
+ void DoStopOrError_Locked();
// Used for accessing data members.
base::Lock lock_;
@@ -128,10 +119,20 @@ class MEDIA_EXPORT VideoRendererBase
// Queue of incoming frames as well as the current frame since the last time
// OnFrameAvailable() was called.
- typedef std::deque< scoped_refptr<VideoFrame> > VideoFrameQueue;
+ typedef std::deque<scoped_refptr<VideoFrame> > VideoFrameQueue;
VideoFrameQueue frames_queue_ready_;
- VideoFrameQueue frames_queue_done_;
+
+ // The current frame available to subclasses for rendering via
+ // GetCurrentFrame(). |current_frame_| can only be altered when
+ // |pending_paint_| is false.
scoped_refptr<VideoFrame> current_frame_;
+
+ // The previous |current_frame_| and is returned via GetCurrentFrame() in the
+ // situation where all frames were deallocated (i.e., during a flush).
+ //
+ // TODO(scherkus): remove this after getting rid of Get/PutCurrentFrame() in
+ // favour of passing ownership of the current frame to the renderer via
+ // callback.
scoped_refptr<VideoFrame> last_available_frame_;
// Used to signal |thread_| as frames are added to |frames_|. Rule of thumb:
@@ -182,12 +183,16 @@ class MEDIA_EXPORT VideoRendererBase
// Previous time returned from the pipeline.
base::TimeDelta previous_time_;
- // Keeps track of our pending buffers. We *must* have no pending reads
- // before executing the flush callback; We decrement it each time we receive
- // a buffer and increment it each time we send a buffer out. therefore if
- // decoder provides buffer, |pending_reads_| is always non-positive and if
- // renderer provides buffer, |pending_reads_| is always non-negative.
- int pending_reads_;
+ // Keep track of various pending operations:
+ // - |pending_read_| is true when there's an active video decoding request.
+ // - |pending_paint_| is true when |current_frame_| is currently being
+ // accessed by the subclass.
+ // - |pending_paint_with_last_available_| is true when
+ // |last_available_frame_| is currently being accessed by the subclass.
+ //
+ // Flushing cannot complete until both |pending_read_| and |pending_paint_|
+ // are false.
+ bool pending_read_;
bool pending_paint_;
bool pending_paint_with_last_available_;
@@ -200,6 +205,8 @@ class MEDIA_EXPORT VideoRendererBase
base::TimeDelta seek_timestamp_;
+ VideoDecoder::ReadCB read_cb_;
+
DISALLOW_COPY_AND_ASSIGN(VideoRendererBase);
};
diff --git a/media/filters/video_renderer_base_unittest.cc b/media/filters/video_renderer_base_unittest.cc
index f7a6490..01ebb53 100644
--- a/media/filters/video_renderer_base_unittest.cc
+++ b/media/filters/video_renderer_base_unittest.cc
@@ -60,11 +60,11 @@ class VideoRendererBaseTest : public ::testing::Test {
decoder_(new MockVideoDecoder()),
cv_(&lock_),
event_(false, false),
- seeking_(false),
- pending_reads_(0) {
+ seeking_(false) {
renderer_->set_host(&host_);
- EXPECT_CALL(*decoder_, natural_size()).WillRepeatedly(Return(kNaturalSize));
+ EXPECT_CALL(*decoder_, natural_size())
+ .WillRepeatedly(ReturnRef(kNaturalSize));
EXPECT_CALL(stats_callback_object_, OnStatistics(_))
.Times(AnyNumber());
@@ -90,7 +90,7 @@ class VideoRendererBaseTest : public ::testing::Test {
.WillRepeatedly(Return(base::TimeDelta()));
// Monitor reads from the decoder.
- EXPECT_CALL(*decoder_, ProduceVideoFrame(_))
+ EXPECT_CALL(*decoder_, Read(_))
.WillRepeatedly(Invoke(this, &VideoRendererBaseTest::FrameRequested));
InSequence s;
@@ -110,7 +110,7 @@ class VideoRendererBaseTest : public ::testing::Test {
Seek(0);
}
- void StartSeeking(int64 timestamp, PipelineStatus expected_status) {
+ void StartSeeking(int64 timestamp) {
EXPECT_FALSE(seeking_);
// Seek to trigger prerolling.
@@ -118,7 +118,7 @@ class VideoRendererBaseTest : public ::testing::Test {
renderer_->Seek(base::TimeDelta::FromMicroseconds(timestamp),
base::Bind(&VideoRendererBaseTest::OnSeekComplete,
base::Unretained(this),
- expected_status));
+ PIPELINE_OK));
}
void Play() {
@@ -129,9 +129,7 @@ class VideoRendererBaseTest : public ::testing::Test {
void Seek(int64 timestamp) {
SCOPED_TRACE(base::StringPrintf("Seek(%" PRId64 ")", timestamp));
- StartSeeking(timestamp, PIPELINE_OK);
-
- // TODO(scherkus): switch to FinishSeeking_DISABLED() (see comments below).
+ StartSeeking(timestamp);
FinishSeeking();
}
@@ -194,19 +192,14 @@ class VideoRendererBaseTest : public ::testing::Test {
event_.Reset();
}
- // Delivers a NULL frame to VideoRendererBase, signalling a decode error.
- void DeliverErrorToRenderer() {
- decoder_->VideoFrameReadyForTest(NULL);
- }
-
- // Delivers a frame with given timestamp and duration to VideoRendererBase.
- void DeliverFrameToRenderer(int64 timestamp, int64 duration) {
+ // Creates a frame with given timestamp and duration.
+ scoped_refptr<VideoFrame> CreateFrame(int64 timestamp, int64 duration) {
scoped_refptr<VideoFrame> frame =
VideoFrame::CreateFrame(VideoFrame::RGB32, kNaturalSize.width(),
kNaturalSize.height(),
base::TimeDelta::FromMicroseconds(timestamp),
base::TimeDelta::FromMicroseconds(duration));
- decoder_->VideoFrameReadyForTest(frame);
+ return frame;
}
protected:
@@ -229,9 +222,10 @@ class VideoRendererBaseTest : public ::testing::Test {
private:
// Called by VideoRendererBase when it wants a frame.
- void FrameRequested(scoped_refptr<VideoFrame> video_frame) {
+ void FrameRequested(const VideoDecoder::ReadCB& callback) {
base::AutoLock l(lock_);
- ++pending_reads_;
+ CHECK(read_cb_.is_null());
+ read_cb_ = callback;
cv_.Signal();
}
@@ -243,33 +237,10 @@ class VideoRendererBaseTest : public ::testing::Test {
cv_.Signal();
}
- // TODO(scherkus): remove this as soon as we move away from our current buffer
- // recycling implementation, which assumes the decoder will continously
- // deliver frames during prerolling -- even if VideoRendererBase didn't ask
- // for them!
void FinishSeeking() {
EXPECT_CALL(*renderer_, OnFrameAvailable());
EXPECT_TRUE(seeking_);
- // Satisfy the read requests. The callback must be executed in order
- // to exit the loop since VideoRendererBase can read a few extra frames
- // after |timestamp| in order to preroll.
- int64 i = 0;
- base::AutoLock l(lock_);
- while (seeking_) {
- // Unlock to deliver the frame to avoid re-entrancy issues.
- base::AutoUnlock ul(lock_);
- DeliverFrameToRenderer(i * kDuration, kDuration);
- ++i;
- }
- }
-
- // TODO(scherkus): this is the proper read/prerolling behaviour we want to
- // have where VideoRendererBase requests a frame and the decoder responds.
- void FinishSeeking_DISABLED() {
- EXPECT_CALL(*renderer_, OnFrameAvailable());
- EXPECT_TRUE(seeking_);
-
base::TimeDelta timeout =
base::TimeDelta::FromMilliseconds(TestTimeouts::action_timeout_ms());
@@ -279,22 +250,23 @@ class VideoRendererBaseTest : public ::testing::Test {
int64 i = 0;
base::AutoLock l(lock_);
while (seeking_) {
- if (pending_reads_ > 0) {
- --pending_reads_;
+ if (!read_cb_.is_null()) {
+ VideoDecoder::ReadCB read_cb;
+ std::swap(read_cb, read_cb_);
// Unlock to deliver the frame to avoid re-entrancy issues.
base::AutoUnlock ul(lock_);
- DeliverFrameToRenderer(i * kDuration, kDuration);
+ read_cb.Run(CreateFrame(i * kDuration, kDuration));
++i;
- } else if (pending_reads_ == 0) {
- // We want to wait iff we're still seeking but have no pending reads.
+ } else {
+ // We want to wait iff we're still seeking but have no pending read.
cv_.TimedWait(timeout);
- CHECK(!seeking_ || pending_reads_ > 0)
+ CHECK(!seeking_ || !read_cb_.is_null())
<< "Timed out waiting for seek or read to occur.";
}
}
- EXPECT_EQ(0, pending_reads_);
+ EXPECT_TRUE(read_cb_.is_null());
}
base::Lock lock_;
@@ -303,7 +275,7 @@ class VideoRendererBaseTest : public ::testing::Test {
// Used in conjunction with |lock_| and |cv_| for satisfying reads.
bool seeking_;
- int pending_reads_;
+ VideoDecoder::ReadCB read_cb_;
DISALLOW_COPY_AND_ASSIGN(VideoRendererBaseTest);
};
@@ -344,43 +316,6 @@ TEST_F(VideoRendererBaseTest, Play) {
Shutdown();
}
-TEST_F(VideoRendererBaseTest, Error_Playing) {
- Initialize();
- Play();
-
- EXPECT_CALL(host_, SetError(PIPELINE_ERROR_DECODE));
- DeliverErrorToRenderer();
- Shutdown();
-}
-
-TEST_F(VideoRendererBaseTest, Error_Seeking) {
- Initialize();
- Pause();
- Flush();
-
- StartSeeking(0, PIPELINE_ERROR_DECODE);
- DeliverErrorToRenderer();
- Shutdown();
-}
-
-TEST_F(VideoRendererBaseTest, Error_DuringPaint) {
- Initialize();
- Play();
-
- // Grab the frame.
- scoped_refptr<VideoFrame> frame;
- renderer_->GetCurrentFrame(&frame);
- EXPECT_TRUE(frame);
-
- // Deliver an error.
- EXPECT_CALL(host_, SetError(PIPELINE_ERROR_DECODE));
- DeliverErrorToRenderer();
-
- // Return the frame then try getting it again -- it should be NULL.
- renderer_->PutCurrentFrame(frame);
- ExpectCurrentFrame(false);
-}
-
TEST_F(VideoRendererBaseTest, Seek_Exact) {
Initialize();
Pause();
diff --git a/media/media.gyp b/media/media.gyp
index 8226238..747c3a9 100644
--- a/media/media.gyp
+++ b/media/media.gyp
@@ -597,7 +597,6 @@
'filters/file_data_source_unittest.cc',
'filters/video_renderer_base_unittest.cc',
'video/capture/video_capture_device_unittest.cc',
- 'video/ffmpeg_video_decode_engine_unittest.cc',
'webm/cluster_builder.cc',
'webm/cluster_builder.h',
],
@@ -624,7 +623,6 @@
'filters/ffmpeg_glue_unittest.cc',
'filters/ffmpeg_h264_bitstream_converter_unittest.cc',
'filters/ffmpeg_video_decoder_unittest.cc',
- 'video/ffmpeg_video_decode_engine_unittest.cc',
],
}],
[ 'target_arch=="ia32" or target_arch=="x64"', {
diff --git a/media/video/ffmpeg_video_decode_engine.cc b/media/video/ffmpeg_video_decode_engine.cc
index ceaa802..a6bff26 100644
--- a/media/video/ffmpeg_video_decode_engine.cc
+++ b/media/video/ffmpeg_video_decode_engine.cc
@@ -5,12 +5,11 @@
#include "media/video/ffmpeg_video_decode_engine.h"
#include "base/command_line.h"
+#include "base/logging.h"
#include "base/string_number_conversions.h"
-#include "base/task.h"
#include "media/base/buffers.h"
-#include "media/base/limits.h"
#include "media/base/media_switches.h"
-#include "media/base/pipeline.h"
+#include "media/base/video_decoder_config.h"
#include "media/base/video_util.h"
#include "media/ffmpeg/ffmpeg_common.h"
@@ -18,26 +17,16 @@ namespace media {
FFmpegVideoDecodeEngine::FFmpegVideoDecodeEngine()
: codec_context_(NULL),
- event_handler_(NULL),
+ av_frame_(NULL),
frame_rate_numerator_(0),
- frame_rate_denominator_(0),
- pending_input_buffers_(0),
- pending_output_buffers_(0),
- output_eos_reached_(false),
- flush_pending_(false) {
+ frame_rate_denominator_(0) {
}
FFmpegVideoDecodeEngine::~FFmpegVideoDecodeEngine() {
- if (codec_context_) {
- av_free(codec_context_->extradata);
- avcodec_close(codec_context_);
- av_free(codec_context_);
- }
+ Uninitialize();
}
-void FFmpegVideoDecodeEngine::Initialize(
- VideoDecodeEngine::EventHandler* event_handler,
- const VideoDecoderConfig& config) {
+bool FFmpegVideoDecodeEngine::Initialize(const VideoDecoderConfig& config) {
frame_rate_numerator_ = config.frame_rate_numerator();
frame_rate_denominator_ = config.frame_rate_denominator();
@@ -80,72 +69,30 @@ void FFmpegVideoDecodeEngine::Initialize(
codec_context_->thread_count = decode_threads;
- // We don't allocate AVFrame on the stack since different versions of FFmpeg
- // may change the size of AVFrame, causing stack corruption. The solution is
- // to let FFmpeg allocate the structure via avcodec_alloc_frame().
- av_frame_.reset(avcodec_alloc_frame());
-
- // If we do not have enough buffers, we will report error too.
- frame_queue_available_.clear();
-
- // Convert the pixel format to video format and ensure we support it.
- VideoFrame::Format format =
- PixelFormatToVideoFormat(codec_context_->pix_fmt);
-
- bool success = false;
- if (format != VideoFrame::INVALID) {
- // Create output buffer pool when direct rendering is not used.
- for (size_t i = 0; i < Limits::kMaxVideoFrames; ++i) {
- scoped_refptr<VideoFrame> video_frame =
- VideoFrame::CreateFrame(format,
- config.visible_rect().width(),
- config.visible_rect().height(),
- kNoTimestamp,
- kNoTimestamp);
- frame_queue_available_.push_back(video_frame);
- }
-
- // Open the codec!
- success = codec && avcodec_open(codec_context_, codec) >= 0;
- }
+ av_frame_ = avcodec_alloc_frame();
- event_handler_ = event_handler;
- event_handler_->OnInitializeComplete(success);
+ // Open the codec!
+ return codec && avcodec_open(codec_context_, codec) >= 0;
}
-void FFmpegVideoDecodeEngine::ConsumeVideoSample(
- scoped_refptr<Buffer> buffer) {
- pending_input_buffers_--;
- if (flush_pending_) {
- TryToFinishPendingFlush();
- } else {
- // Otherwise try to decode this buffer.
- DecodeFrame(buffer);
+void FFmpegVideoDecodeEngine::Uninitialize() {
+ if (codec_context_) {
+ av_free(codec_context_->extradata);
+ avcodec_close(codec_context_);
+ av_free(codec_context_);
+ codec_context_ = NULL;
}
-}
-
-void FFmpegVideoDecodeEngine::ProduceVideoFrame(
- scoped_refptr<VideoFrame> frame) {
- // We should never receive NULL frame or EOS frame.
- DCHECK(frame.get() && !frame->IsEndOfStream());
-
- // Increment pending output buffer count.
- pending_output_buffers_++;
-
- // Return this frame to available pool after display.
- frame_queue_available_.push_back(frame);
-
- if (flush_pending_) {
- TryToFinishPendingFlush();
- } else if (!output_eos_reached_) {
- // If we already deliver EOS to renderer, we stop reading new input.
- ReadInput();
+ if (av_frame_) {
+ av_free(av_frame_);
+ av_frame_ = NULL;
}
+ frame_rate_numerator_ = 0;
+ frame_rate_denominator_ = 0;
}
-// Try to decode frame when both input and output are ready.
-void FFmpegVideoDecodeEngine::DecodeFrame(scoped_refptr<Buffer> buffer) {
- scoped_refptr<VideoFrame> video_frame;
+bool FFmpegVideoDecodeEngine::Decode(const scoped_refptr<Buffer>& buffer,
+ scoped_refptr<VideoFrame>* video_frame) {
+ DCHECK(video_frame);
// Create a packet for input data.
// Due to FFmpeg API changes we no longer have const read-only pointers.
@@ -154,9 +101,6 @@ void FFmpegVideoDecodeEngine::DecodeFrame(scoped_refptr<Buffer> buffer) {
packet.data = const_cast<uint8*>(buffer->GetData());
packet.size = buffer->GetDataSize();
- PipelineStatistics statistics;
- statistics.video_bytes_decoded = buffer->GetDataSize();
-
// Let FFmpeg handle presentation timestamp reordering.
codec_context_->reordered_opaque = buffer->GetTimestamp().InMicroseconds();
@@ -166,7 +110,7 @@ void FFmpegVideoDecodeEngine::DecodeFrame(scoped_refptr<Buffer> buffer) {
int frame_decoded = 0;
int result = avcodec_decode_video2(codec_context_,
- av_frame_.get(),
+ av_frame_,
&frame_decoded,
&packet);
// Log the problem if we can't decode a video frame and exit early.
@@ -175,23 +119,17 @@ void FFmpegVideoDecodeEngine::DecodeFrame(scoped_refptr<Buffer> buffer) {
<< buffer->GetTimestamp().InMicroseconds() << " us, duration: "
<< buffer->GetDuration().InMicroseconds() << " us, packet size: "
<< buffer->GetDataSize() << " bytes";
- event_handler_->OnError();
- return;
+ *video_frame = NULL;
+ return false;
}
- // If frame_decoded == 0, then no frame was produced.
- // In this case, if we already begin to flush codec with empty
- // input packet at the end of input stream, the first time we
- // encounter frame_decoded == 0 signal output frame had been
- // drained, we mark the flag. Otherwise we read from demuxer again.
+ // If no frame was produced then signal that more data is required to
+ // produce more frames. This can happen under two circumstances:
+ // 1) Decoder was recently initialized/flushed
+ // 2) End of stream was reached and all internal frames have been output
if (frame_decoded == 0) {
- if (buffer->IsEndOfStream()) { // We had started flushing.
- event_handler_->ConsumeVideoFrame(video_frame, statistics);
- output_eos_reached_ = true;
- } else {
- ReadInput();
- }
- return;
+ *video_frame = NULL;
+ return true;
}
// TODO(fbarchard): Work around for FFmpeg http://crbug.com/27675
@@ -200,8 +138,16 @@ void FFmpegVideoDecodeEngine::DecodeFrame(scoped_refptr<Buffer> buffer) {
if (!av_frame_->data[VideoFrame::kYPlane] ||
!av_frame_->data[VideoFrame::kUPlane] ||
!av_frame_->data[VideoFrame::kVPlane]) {
- event_handler_->OnError();
- return;
+ LOG(ERROR) << "Video frame was produced yet has invalid frame data.";
+ *video_frame = NULL;
+ return false;
+ }
+
+ // We've got a frame! Make sure we have a place to store it.
+ *video_frame = AllocateVideoFrame();
+ if (!(*video_frame)) {
+ LOG(ERROR) << "Failed to allocate video frame";
+ return false;
}
// Determine timestamp and calculate the duration based on the repeat picture
@@ -217,83 +163,38 @@ void FFmpegVideoDecodeEngine::DecodeFrame(scoped_refptr<Buffer> buffer) {
doubled_time_base.num = frame_rate_denominator_;
doubled_time_base.den = frame_rate_numerator_ * 2;
- base::TimeDelta timestamp =
- base::TimeDelta::FromMicroseconds(av_frame_->reordered_opaque);
- base::TimeDelta duration =
- ConvertFromTimeBase(doubled_time_base, 2 + av_frame_->repeat_pict);
-
- // Available frame is guaranteed, because we issue as much reads as
- // available frame, except the case of |frame_decoded| == 0, which
- // implies decoder order delay, and force us to read more inputs.
- DCHECK(frame_queue_available_.size());
- video_frame = frame_queue_available_.front();
- frame_queue_available_.pop_front();
+ (*video_frame)->SetTimestamp(
+ base::TimeDelta::FromMicroseconds(av_frame_->reordered_opaque));
+ (*video_frame)->SetDuration(
+ ConvertFromTimeBase(doubled_time_base, 2 + av_frame_->repeat_pict));
// Copy the frame data since FFmpeg reuses internal buffers for AVFrame
// output, meaning the data is only valid until the next
// avcodec_decode_video() call.
- //
- // TODO(scherkus): use VideoFrame dimensions instead and re-allocate
- // VideoFrame if dimensions changes, but for now adjust size locally.
int y_rows = codec_context_->height;
int uv_rows = codec_context_->height;
if (codec_context_->pix_fmt == PIX_FMT_YUV420P) {
uv_rows /= 2;
}
- CopyYPlane(av_frame_->data[0], av_frame_->linesize[0], y_rows, video_frame);
- CopyUPlane(av_frame_->data[1], av_frame_->linesize[1], uv_rows, video_frame);
- CopyVPlane(av_frame_->data[2], av_frame_->linesize[2], uv_rows, video_frame);
+ CopyYPlane(av_frame_->data[0], av_frame_->linesize[0], y_rows, *video_frame);
+ CopyUPlane(av_frame_->data[1], av_frame_->linesize[1], uv_rows, *video_frame);
+ CopyVPlane(av_frame_->data[2], av_frame_->linesize[2], uv_rows, *video_frame);
- video_frame->SetTimestamp(timestamp);
- video_frame->SetDuration(duration);
-
- pending_output_buffers_--;
- event_handler_->ConsumeVideoFrame(video_frame, statistics);
-}
-
-void FFmpegVideoDecodeEngine::Uninitialize() {
- event_handler_->OnUninitializeComplete();
+ return true;
}
void FFmpegVideoDecodeEngine::Flush() {
avcodec_flush_buffers(codec_context_);
- flush_pending_ = true;
- TryToFinishPendingFlush();
}
-void FFmpegVideoDecodeEngine::TryToFinishPendingFlush() {
- DCHECK(flush_pending_);
+scoped_refptr<VideoFrame> FFmpegVideoDecodeEngine::AllocateVideoFrame() {
+ VideoFrame::Format format = PixelFormatToVideoFormat(codec_context_->pix_fmt);
+ size_t width = codec_context_->width;
+ size_t height = codec_context_->height;
- // We consider ourself flushed when there is no pending input buffers
- // and output buffers, which implies that all buffers had been returned
- // to its owner.
- if (!pending_input_buffers_ && !pending_output_buffers_) {
- // Try to finish flushing and notify pipeline.
- flush_pending_ = false;
- event_handler_->OnFlushComplete();
- }
-}
-
-void FFmpegVideoDecodeEngine::Seek() {
- // After a seek, output stream no longer considered as EOS.
- output_eos_reached_ = false;
-
- // The buffer provider is assumed to perform pre-roll operation.
- for (unsigned int i = 0; i < Limits::kMaxVideoFrames; ++i)
- ReadInput();
-
- event_handler_->OnSeekComplete();
-}
-
-void FFmpegVideoDecodeEngine::ReadInput() {
- DCHECK_EQ(output_eos_reached_, false);
- pending_input_buffers_++;
- event_handler_->ProduceVideoSample(NULL);
+ return VideoFrame::CreateFrame(format, width, height,
+ kNoTimestamp, kNoTimestamp);
}
} // namespace media
-
-// Disable refcounting for this object because this object only lives
-// on the video decoder thread and there's no need to refcount it.
-DISABLE_RUNNABLE_METHOD_REFCOUNT(media::FFmpegVideoDecodeEngine);
diff --git a/media/video/ffmpeg_video_decode_engine.h b/media/video/ffmpeg_video_decode_engine.h
index 3ac7411..072507a 100644
--- a/media/video/ffmpeg_video_decode_engine.h
+++ b/media/video/ffmpeg_video_decode_engine.h
@@ -5,13 +5,9 @@
#ifndef MEDIA_VIDEO_FFMPEG_VIDEO_DECODE_ENGINE_H_
#define MEDIA_VIDEO_FFMPEG_VIDEO_DECODE_ENGINE_H_
-#include <deque>
-
-#include "base/memory/scoped_ptr.h"
-#include "media/ffmpeg/ffmpeg_common.h"
+#include "base/compiler_specific.h"
#include "media/video/video_decode_engine.h"
-// FFmpeg types.
struct AVCodecContext;
struct AVFrame;
@@ -22,47 +18,26 @@ class MEDIA_EXPORT FFmpegVideoDecodeEngine : public VideoDecodeEngine {
FFmpegVideoDecodeEngine();
virtual ~FFmpegVideoDecodeEngine();
- // Implementation of the VideoDecodeEngine Interface.
- virtual void Initialize(VideoDecodeEngine::EventHandler* event_handler,
- const VideoDecoderConfig& config);
- virtual void ConsumeVideoSample(scoped_refptr<Buffer> buffer);
- virtual void ProduceVideoFrame(scoped_refptr<VideoFrame> frame);
- virtual void Uninitialize();
- virtual void Flush();
- virtual void Seek();
+ // VideoDecodeEngine implementation.
+ virtual bool Initialize(const VideoDecoderConfig& config) OVERRIDE;
+ virtual void Uninitialize() OVERRIDE;
+ virtual bool Decode(const scoped_refptr<Buffer>& buffer,
+ scoped_refptr<VideoFrame>* video_frame) OVERRIDE;
+ virtual void Flush() OVERRIDE;
private:
- void DecodeFrame(scoped_refptr<Buffer> buffer);
- void ReadInput();
- void TryToFinishPendingFlush();
+ // Allocates a video frame based on the current format and dimensions based on
+ // the current state of |codec_context_|.
+ scoped_refptr<VideoFrame> AllocateVideoFrame();
+ // FFmpeg structures owned by this object.
AVCodecContext* codec_context_;
- scoped_ptr_malloc<AVFrame, ScopedPtrAVFree> av_frame_;
- VideoDecodeEngine::EventHandler* event_handler_;
+ AVFrame* av_frame_;
// Frame rate of the video.
int frame_rate_numerator_;
int frame_rate_denominator_;
- // Indicate how many buffers are pending on input port of this filter:
- // Increment when engine receive one input packet from demuxer;
- // Decrement when engine send one input packet to demuxer;
- int pending_input_buffers_;
-
- // Indicate how many buffers are pending on output port of this filter:
- // Increment when engine receive one output frame from renderer;
- // Decrement when engine send one output frame to renderer;
- int pending_output_buffers_;
-
- // Whether end of stream had been reached at output side.
- bool output_eos_reached_;
-
- // Used when direct rendering is disabled to hold available output buffers.
- std::deque<scoped_refptr<VideoFrame> > frame_queue_available_;
-
- // Whether flush operation is pending.
- bool flush_pending_;
-
DISALLOW_COPY_AND_ASSIGN(FFmpegVideoDecodeEngine);
};
diff --git a/media/video/ffmpeg_video_decode_engine_unittest.cc b/media/video/ffmpeg_video_decode_engine_unittest.cc
deleted file mode 100644
index f2cf348..0000000
--- a/media/video/ffmpeg_video_decode_engine_unittest.cc
+++ /dev/null
@@ -1,286 +0,0 @@
-// Copyright (c) 2011 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "base/memory/scoped_ptr.h"
-#include "base/message_loop.h"
-#include "media/base/data_buffer.h"
-#include "media/base/pipeline.h"
-#include "media/base/test_data_util.h"
-#include "media/filters/ffmpeg_glue.h"
-#include "media/video/ffmpeg_video_decode_engine.h"
-#include "testing/gmock/include/gmock/gmock.h"
-#include "testing/gtest/include/gtest/gtest.h"
-
-using ::testing::_;
-using ::testing::DoAll;
-using ::testing::Return;
-using ::testing::ReturnNull;
-using ::testing::SaveArg;
-using ::testing::SetArgumentPointee;
-using ::testing::StrictMock;
-
-namespace media {
-
-static const VideoFrame::Format kVideoFormat = VideoFrame::YV12;
-static const gfx::Size kCodedSize(320, 240);
-static const gfx::Rect kVisibleRect(320, 240);
-static const gfx::Size kNaturalSize(522, 288);
-static const AVRational kFrameRate = { 100, 1 };
-static const AVRational kAspectRatio = { 1, 1 };
-
-ACTION_P2(DemuxComplete, engine, buffer) {
- engine->ConsumeVideoSample(buffer);
-}
-
-class FFmpegVideoDecodeEngineTest
- : public testing::Test,
- public VideoDecodeEngine::EventHandler {
- public:
- FFmpegVideoDecodeEngineTest()
- : config_(kCodecVP8, kVideoFormat, kCodedSize, kVisibleRect,
- kFrameRate.num, kFrameRate.den,
- kAspectRatio.num, kAspectRatio.den,
- NULL, 0) {
- CHECK(FFmpegGlue::GetInstance());
-
- // Setup FFmpeg structures.
- frame_buffer_.reset(new uint8[kCodedSize.GetArea()]);
-
- test_engine_.reset(new FFmpegVideoDecodeEngine());
-
- ReadTestDataFile("vp8-I-frame-320x240", &i_frame_buffer_);
- ReadTestDataFile("vp8-corrupt-I-frame", &corrupt_i_frame_buffer_);
-
- end_of_stream_buffer_ = new DataBuffer(0);
- }
-
- ~FFmpegVideoDecodeEngineTest() {
- test_engine_.reset();
- }
-
- void Initialize() {
- EXPECT_CALL(*this, OnInitializeComplete(true));
- test_engine_->Initialize(this, config_);
- }
-
- // Decodes the single compressed frame in |buffer| and writes the
- // uncompressed output to |video_frame|. This method works with single
- // and multithreaded decoders. End of stream buffers are used to trigger
- // the frame to be returned in the multithreaded decoder case.
- void DecodeASingleFrame(const scoped_refptr<Buffer>& buffer,
- scoped_refptr<VideoFrame>* video_frame) {
- EXPECT_CALL(*this, ProduceVideoSample(_))
- .WillOnce(DemuxComplete(test_engine_.get(), buffer))
- .WillRepeatedly(DemuxComplete(test_engine_.get(),
- end_of_stream_buffer_));
-
- EXPECT_CALL(*this, ConsumeVideoFrame(_, _))
- .WillOnce(SaveArg<0>(video_frame));
- CallProduceVideoFrame();
- }
-
- // Decodes |i_frame_buffer_| and then decodes the data contained in
- // the file named |test_file_name|. This function expects both buffers
- // to decode to frames that are the same size.
- void DecodeIFrameThenTestFile(const std::string& test_file_name) {
- Initialize();
-
- scoped_refptr<VideoFrame> video_frame_a;
- scoped_refptr<VideoFrame> video_frame_b;
-
- scoped_refptr<Buffer> buffer;
- ReadTestDataFile(test_file_name, &buffer);
-
- EXPECT_CALL(*this, ProduceVideoSample(_))
- .WillOnce(DemuxComplete(test_engine_.get(), i_frame_buffer_))
- .WillOnce(DemuxComplete(test_engine_.get(), buffer))
- .WillRepeatedly(DemuxComplete(test_engine_.get(),
- end_of_stream_buffer_));
-
- EXPECT_CALL(*this, ConsumeVideoFrame(_, _))
- .WillOnce(SaveArg<0>(&video_frame_a))
- .WillOnce(SaveArg<0>(&video_frame_b));
- CallProduceVideoFrame();
- CallProduceVideoFrame();
-
- size_t expected_width = static_cast<size_t>(kVisibleRect.width());
- size_t expected_height = static_cast<size_t>(kVisibleRect.height());
-
- EXPECT_EQ(expected_width, video_frame_a->width());
- EXPECT_EQ(expected_height, video_frame_a->height());
- EXPECT_EQ(expected_width, video_frame_b->width());
- EXPECT_EQ(expected_height, video_frame_b->height());
- }
-
- // VideoDecodeEngine::EventHandler implementation.
- MOCK_METHOD2(ConsumeVideoFrame,
- void(scoped_refptr<VideoFrame>, const PipelineStatistics&));
- MOCK_METHOD1(ProduceVideoSample, void(scoped_refptr<Buffer>));
- MOCK_METHOD1(OnInitializeComplete, void(bool));
- MOCK_METHOD0(OnUninitializeComplete, void());
- MOCK_METHOD0(OnFlushComplete, void());
- MOCK_METHOD0(OnSeekComplete, void());
- MOCK_METHOD0(OnError, void());
-
- void CallProduceVideoFrame() {
- test_engine_->ProduceVideoFrame(VideoFrame::CreateFrame(
- VideoFrame::YV12, kVisibleRect.width(), kVisibleRect.height(),
- kNoTimestamp, kNoTimestamp));
- }
-
- protected:
- VideoDecoderConfig config_;
- scoped_ptr<FFmpegVideoDecodeEngine> test_engine_;
- scoped_array<uint8_t> frame_buffer_;
- scoped_refptr<Buffer> i_frame_buffer_;
- scoped_refptr<Buffer> corrupt_i_frame_buffer_;
- scoped_refptr<Buffer> end_of_stream_buffer_;
-
- private:
- DISALLOW_COPY_AND_ASSIGN(FFmpegVideoDecodeEngineTest);
-};
-
-TEST_F(FFmpegVideoDecodeEngineTest, Initialize_Normal) {
- Initialize();
-}
-
-TEST_F(FFmpegVideoDecodeEngineTest, Initialize_FindDecoderFails) {
- VideoDecoderConfig config(kUnknownVideoCodec, kVideoFormat,
- kCodedSize, kVisibleRect,
- kFrameRate.num, kFrameRate.den,
- kAspectRatio.num, kAspectRatio.den,
- NULL, 0);
-
- // Test avcodec_find_decoder() returning NULL.
- EXPECT_CALL(*this, OnInitializeComplete(false));
- test_engine_->Initialize(this, config);
-}
-
-TEST_F(FFmpegVideoDecodeEngineTest, Initialize_OpenDecoderFails) {
- // Specify Theora w/o extra data so that avcodec_open() fails.
- VideoDecoderConfig config(kCodecTheora, kVideoFormat,
- kCodedSize, kVisibleRect,
- kFrameRate.num, kFrameRate.den,
- kAspectRatio.num, kAspectRatio.den,
- NULL, 0);
- EXPECT_CALL(*this, OnInitializeComplete(false));
- test_engine_->Initialize(this, config);
-}
-
-TEST_F(FFmpegVideoDecodeEngineTest, Initialize_UnsupportedPixelFormat) {
- // Ensure decoder handles unsupport pixel formats without crashing.
- VideoDecoderConfig config(kCodecVP8, VideoFrame::INVALID,
- kCodedSize, kVisibleRect,
- kFrameRate.num, kFrameRate.den,
- kAspectRatio.num, kAspectRatio.den,
- NULL, 0);
- EXPECT_CALL(*this, OnInitializeComplete(false));
- test_engine_->Initialize(this, config);
-}
-
-TEST_F(FFmpegVideoDecodeEngineTest, DecodeFrame_Normal) {
- Initialize();
-
- // Simulate decoding a single frame.
- scoped_refptr<VideoFrame> video_frame;
- DecodeASingleFrame(i_frame_buffer_, &video_frame);
-
- // |video_frame| timestamp is 0 because we set the timestamp based off
- // the buffer timestamp.
- ASSERT_TRUE(video_frame);
- EXPECT_EQ(0, video_frame->GetTimestamp().ToInternalValue());
- EXPECT_EQ(10000, video_frame->GetDuration().ToInternalValue());
-}
-
-
-// Verify current behavior for 0 byte frames. FFmpeg simply ignores
-// the 0 byte frames.
-TEST_F(FFmpegVideoDecodeEngineTest, DecodeFrame_0ByteFrame) {
- Initialize();
-
- scoped_refptr<DataBuffer> zero_byte_buffer = new DataBuffer(1);
-
- scoped_refptr<VideoFrame> video_frame_a;
- scoped_refptr<VideoFrame> video_frame_b;
- scoped_refptr<VideoFrame> video_frame_c;
-
- EXPECT_CALL(*this, ProduceVideoSample(_))
- .WillOnce(DemuxComplete(test_engine_.get(), i_frame_buffer_))
- .WillOnce(DemuxComplete(test_engine_.get(), zero_byte_buffer))
- .WillOnce(DemuxComplete(test_engine_.get(), i_frame_buffer_))
- .WillRepeatedly(DemuxComplete(test_engine_.get(),
- end_of_stream_buffer_));
-
- EXPECT_CALL(*this, ConsumeVideoFrame(_, _))
- .WillOnce(SaveArg<0>(&video_frame_a))
- .WillOnce(SaveArg<0>(&video_frame_b))
- .WillOnce(SaveArg<0>(&video_frame_c));
- CallProduceVideoFrame();
- CallProduceVideoFrame();
- CallProduceVideoFrame();
-
- EXPECT_TRUE(video_frame_a);
- EXPECT_TRUE(video_frame_b);
- EXPECT_FALSE(video_frame_c);
-}
-
-
-TEST_F(FFmpegVideoDecodeEngineTest, DecodeFrame_DecodeError) {
- Initialize();
-
- EXPECT_CALL(*this, ProduceVideoSample(_))
- .WillOnce(DemuxComplete(test_engine_.get(), corrupt_i_frame_buffer_))
- .WillRepeatedly(DemuxComplete(test_engine_.get(), i_frame_buffer_));
- EXPECT_CALL(*this, OnError());
-
- CallProduceVideoFrame();
-}
-
-// Multi-threaded decoders have different behavior than single-threaded
-// decoders at the end of the stream. Multithreaded decoders hide errors
-// that happen on the last |codec_context_->thread_count| frames to avoid
-// prematurely signalling EOS. This test just exposes that behavior so we can
-// detect if it changes.
-TEST_F(FFmpegVideoDecodeEngineTest, DecodeFrame_DecodeErrorAtEndOfStream) {
- Initialize();
-
- EXPECT_CALL(*this, ProduceVideoSample(_))
- .WillOnce(DemuxComplete(test_engine_.get(), corrupt_i_frame_buffer_))
- .WillRepeatedly(DemuxComplete(test_engine_.get(), end_of_stream_buffer_));
-
- scoped_refptr<VideoFrame> video_frame;
- EXPECT_CALL(*this, ConsumeVideoFrame(_, _))
- .WillOnce(SaveArg<0>(&video_frame));
- CallProduceVideoFrame();
-
- EXPECT_FALSE(video_frame);
-}
-
-// Decode |i_frame_buffer_| and then a frame with a larger width and verify
-// the output size didn't change.
-// TODO(acolwell): Fix InvalidRead detected by Valgrind
-//TEST_F(FFmpegVideoDecodeEngineTest, DecodeFrame_LargerWidth) {
-// DecodeIFrameThenTestFile("vp8-I-frame-640x240");
-//}
-
-// Decode |i_frame_buffer_| and then a frame with a smaller width and verify
-// the output size didn't change.
-TEST_F(FFmpegVideoDecodeEngineTest, DecodeFrame_SmallerWidth) {
- DecodeIFrameThenTestFile("vp8-I-frame-160x240");
-}
-
-// Decode |i_frame_buffer_| and then a frame with a larger height and verify
-// the output size didn't change.
-// TODO(acolwell): Fix InvalidRead detected by Valgrind
-//TEST_F(FFmpegVideoDecodeEngineTest, DecodeFrame_LargerHeight) {
-// DecodeIFrameThenTestFile("vp8-I-frame-320x480");
-//}
-
-// Decode |i_frame_buffer_| and then a frame with a smaller height and verify
-// the output size didn't change.
-TEST_F(FFmpegVideoDecodeEngineTest, DecodeFrame_SmallerHeight) {
- DecodeIFrameThenTestFile("vp8-I-frame-320x120");
-}
-
-} // namespace media
diff --git a/media/video/video_decode_engine.h b/media/video/video_decode_engine.h
index 2d52b24..4599331 100644
--- a/media/video/video_decode_engine.h
+++ b/media/video/video_decode_engine.h
@@ -5,101 +5,39 @@
#ifndef MEDIA_VIDEO_VIDEO_DECODE_ENGINE_H_
#define MEDIA_VIDEO_VIDEO_DECODE_ENGINE_H_
-#include "base/callback.h"
-#include "base/memory/scoped_ptr.h"
+#include "base/memory/ref_counted.h"
#include "media/base/media_export.h"
-#include "media/base/video_decoder_config.h"
-#include "media/base/video_frame.h"
namespace media {
class Buffer;
-struct PipelineStatistics;
+class VideoDecoderConfig;
+class VideoFrame;
class MEDIA_EXPORT VideoDecodeEngine {
public:
- struct MEDIA_EXPORT EventHandler {
- public:
- virtual ~EventHandler() {}
- virtual void OnInitializeComplete(bool success) = 0;
- virtual void OnUninitializeComplete() = 0;
- virtual void OnFlushComplete() = 0;
- virtual void OnSeekComplete() = 0;
- virtual void OnError() = 0;
-
- // TODO(hclam): The following two methods shouldn't belong to this class
- // because they are not video decode events but used to send decoded
- // video frames and request video packets.
- //
- // Signal the user of VideoDecodeEngine to provide a video sample.
- //
- // In the normal running state, this method is called by the video decode
- // engine to request video samples used for decoding.
- //
- // In the case when the video decode engine is flushing, this method is
- // called to return video samples acquired by the video decode engine.
- //
- // |buffer| can be NULL in which case this method call is purely for
- // requesting new video samples. If |buffer| is non-NULL, the buffer is
- // returned to the owner at the same time as a request for video sample
- // is made.
- virtual void ProduceVideoSample(scoped_refptr<Buffer> buffer) = 0;
-
- // Signal the user of VideoDecodeEngine that a video frame is ready to
- // be consumed or a video frame is returned to the owner.
- //
- // In the normal running state, this method is called to signal that
- // |frame| contains a decoded video frame and is ready to be used.
- //
- // In the case of flushing and video frame is provided externally, this
- // method is called to return the video frame object to the owner.
- // The content of the video frame may be invalid.
- virtual void ConsumeVideoFrame(scoped_refptr<VideoFrame> frame,
- const PipelineStatistics& statistics) = 0;
- };
-
virtual ~VideoDecodeEngine() {}
- // Initialize the engine with specified configuration.
- //
- // Engine should call EventHandler::OnInitializeDone() whether the
- // initialization operation finished successfully or not.
- virtual void Initialize(EventHandler* event_handler,
- const VideoDecoderConfig& config) = 0;
+ // Initialize the engine with specified configuration, returning true if
+ // successful.
+ virtual bool Initialize(const VideoDecoderConfig& config) = 0;
- // Uninitialize the engine. Engine should destroy all resources and call
- // EventHandler::OnUninitializeComplete().
+ // Uninitialize the engine, freeing all resources. Calls to Flush() or
+ // Decode() will have no effect afterwards.
virtual void Uninitialize() = 0;
- // Flush the engine. Engine should return all the buffers to owner ( which
- // could be itself. ) then call EventHandler::OnFlushDone().
- virtual void Flush() = 0;
-
- // This method is used as a signal for the decode engine to preroll and
- // issue read requests after Flush() is made.
- virtual void Seek() = 0;
-
- // Provide a video sample to be used by the video decode engine.
+ // Decode the encoded video data and store the result (if any) into
+ // |video_frame|. Note that a frame may not always be produced if the
+ // decode engine has insufficient encoded data. In such circumstances,
+ // additional calls to Decode() may be required.
//
- // This method is called in response to ProvideVideoSample() called to the
- // user.
- virtual void ConsumeVideoSample(scoped_refptr<Buffer> buffer) = 0;
+ // Returns true if decoding was successful (includes zero length input and end
+ // of stream), false if a decoding error occurred.
+ virtual bool Decode(const scoped_refptr<Buffer>& buffer,
+ scoped_refptr<VideoFrame>* video_frame) = 0;
- // Signal the video decode engine to produce a video frame or return the
- // video frame object to the video decode engine.
- //
- // In the normal running state, this method is called by the user of the
- // video decode engine to request a decoded video frame. If |frame| is
- // NULL the video decode engine should allocate a video frame object.
- // Otherwise video decode engine should try to use the video frame object
- // provided as output.
- //
- // In flushing state and video frames are allocated internally this method
- // is called by the user to return the video frame object.
- //
- // In response to this method call, ConsumeVideoFrame() is called with a
- // video frame object containing decoded video content.
- virtual void ProduceVideoFrame(scoped_refptr<VideoFrame> frame) = 0;
+ // Discard all pending data that has yet to be returned via Decode().
+ virtual void Flush() = 0;
};
} // namespace media