diff options
author | jiesun@google.com <jiesun@google.com@0039d316-1c4b-4281-b951-d872f2087c98> | 2010-08-11 23:24:58 +0000 |
---|---|---|
committer | jiesun@google.com <jiesun@google.com@0039d316-1c4b-4281-b951-d872f2087c98> | 2010-08-11 23:24:58 +0000 |
commit | 3d54fc6b8271fd58e4f239c169650a90225e4688 (patch) | |
tree | dcf0c96315ca079edd94f41f939af9314296f2b9 /media | |
parent | d461264aa483e2ace1e0225ebb442dbf4c753ada (diff) | |
download | chromium_src-3d54fc6b8271fd58e4f239c169650a90225e4688.zip chromium_src-3d54fc6b8271fd58e4f239c169650a90225e4688.tar.gz chromium_src-3d54fc6b8271fd58e4f239c169650a90225e4688.tar.bz2 |
preparation for recycling buffer, patch 2
1. add ProvidesBuffer in Filter interface, not used yet.
2. add Flush stage in pipeline. not used. Render's pause work is moved to Renderer's flush().
3. merge decoder_base with ffmpeg_video_decoder. because it is shared by audio.
Committed: http://src.chromium.org/viewvc/chrome?view=rev&revision=55603
Review URL: http://codereview.chromium.org/3030013
git-svn-id: svn://svn.chromium.org/chrome/trunk/src@55807 0039d316-1c4b-4281-b951-d872f2087c98
Diffstat (limited to 'media')
-rw-r--r-- | media/base/filters.h | 25 | ||||
-rw-r--r-- | media/base/mock_filters.h | 2 | ||||
-rw-r--r-- | media/base/pipeline_impl.cc | 19 | ||||
-rw-r--r-- | media/base/pipeline_impl.h | 1 | ||||
-rw-r--r-- | media/filters/ffmpeg_video_decode_engine.cc | 18 | ||||
-rw-r--r-- | media/filters/ffmpeg_video_decode_engine.h | 7 | ||||
-rw-r--r-- | media/filters/ffmpeg_video_decode_engine_unittest.cc | 53 | ||||
-rw-r--r-- | media/filters/ffmpeg_video_decoder.cc | 236 | ||||
-rw-r--r-- | media/filters/ffmpeg_video_decoder.h | 60 | ||||
-rw-r--r-- | media/filters/ffmpeg_video_decoder_unittest.cc | 325 | ||||
-rw-r--r-- | media/filters/omx_video_decode_engine.cc | 15 | ||||
-rw-r--r-- | media/filters/omx_video_decode_engine.h | 3 | ||||
-rw-r--r-- | media/filters/omx_video_decoder.cc | 17 | ||||
-rw-r--r-- | media/filters/omx_video_decoder.h | 3 | ||||
-rw-r--r-- | media/filters/video_decode_engine.h | 4 | ||||
-rw-r--r-- | media/filters/video_renderer_base.cc | 53 | ||||
-rw-r--r-- | media/filters/video_renderer_base.h | 4 |
17 files changed, 547 insertions, 298 deletions
diff --git a/media/base/filters.h b/media/base/filters.h index 8879233..e9d2c27 100644 --- a/media/base/filters.h +++ b/media/base/filters.h @@ -92,7 +92,7 @@ class MediaFilter : public base::RefCountedThreadSafe<MediaFilter> { // The pipeline has resumed playback. Filters can continue requesting reads. // Filters may implement this method if they need to respond to this call. - // TODO(boliu): Check that callback is not NULL in sublcasses. + // TODO(boliu): Check that callback is not NULL in subclasses. virtual void Play(FilterCallback* callback) { DCHECK(callback); if (callback) { @@ -101,10 +101,9 @@ class MediaFilter : public base::RefCountedThreadSafe<MediaFilter> { } } - // The pipeline has paused playback. Filters should fulfill any existing read - // requests and then idle. Filters may implement this method if they need to - // respond to this call. - // TODO(boliu): Check that callback is not NULL in sublcasses. + // The pipeline has paused playback. Filters should stop buffer exchange. + // Filters may implement this method if they need to respond to this call. + // TODO(boliu): Check that callback is not NULL in subclasses. virtual void Pause(FilterCallback* callback) { DCHECK(callback); if (callback) { @@ -113,9 +112,20 @@ class MediaFilter : public base::RefCountedThreadSafe<MediaFilter> { } } + // The pipeline has been flushed. Filters should return buffer to owners. + // Filters may implement this method if they need to respond to this call. + // TODO(boliu): Check that callback is not NULL in subclasses. + virtual void Flush(FilterCallback* callback) { + DCHECK(callback); + if (callback) { + callback->Run(); + delete callback; + } + } + // The pipeline is being stopped either as a result of an error or because // the client called Stop(). - // TODO(boliu): Check that callback is not NULL in sublcasses. + // TODO(boliu): Check that callback is not NULL in subclasses. virtual void Stop(FilterCallback* callback) { DCHECK(callback); if (callback) { @@ -293,6 +303,9 @@ class VideoDecoder : public MediaFilter { // We could also pass empty pointer here to let decoder provide buffers pool. virtual void FillThisBuffer(scoped_refptr<VideoFrame> frame) = 0; + // Indicate whether decoder provides its own output buffers + virtual bool ProvidesBuffer() = 0; + private: scoped_ptr<FillBufferDoneCallback> fill_buffer_done_callback_; }; diff --git a/media/base/mock_filters.h b/media/base/mock_filters.h index ac9aab6..7f1bdaf 100644 --- a/media/base/mock_filters.h +++ b/media/base/mock_filters.h @@ -172,6 +172,7 @@ class MockVideoDecoder : public VideoDecoder { FilterCallback* callback)); MOCK_METHOD0(media_format, const MediaFormat&()); MOCK_METHOD1(FillThisBuffer, void(scoped_refptr<VideoFrame>)); + MOCK_METHOD0(ProvidesBuffer, bool()); protected: virtual ~MockVideoDecoder() {} @@ -222,6 +223,7 @@ class MockVideoRenderer : public VideoRenderer { MOCK_METHOD2(Initialize, void(VideoDecoder* decoder, FilterCallback* callback)); MOCK_METHOD0(HasEnded, bool()); + MOCK_METHOD1(FillThisBufferDone, void(scoped_refptr<VideoFrame> frame)); protected: virtual ~MockVideoRenderer() {} diff --git a/media/base/pipeline_impl.cc b/media/base/pipeline_impl.cc index e15492e..d8b3192 100644 --- a/media/base/pipeline_impl.cc +++ b/media/base/pipeline_impl.cc @@ -57,19 +57,6 @@ const char* GetThreadName() { } } -// Helper function used with NewRunnableMethod to implement a (very) crude -// blocking counter. -// -// TODO(scherkus): remove this as soon as Stop() is made asynchronous. -void DecrementCounter(Lock* lock, ConditionVariable* cond_var, int* count) { - AutoLock auto_lock(*lock); - --(*count); - CHECK(*count >= 0); - if (*count == 0) { - cond_var->Signal(); - } -} - } // namespace PipelineImpl::PipelineImpl(MessageLoop* message_loop) @@ -149,6 +136,7 @@ bool PipelineImpl::IsInitialized() const { AutoLock auto_lock(lock_); switch (state_) { case kPausing: + case kFlushing: case kSeeking: case kStarting: case kStarted: @@ -382,6 +370,7 @@ void PipelineImpl::FinishInitialization() { // static bool PipelineImpl::TransientState(State state) { return state == kPausing || + state == kFlushing || state == kSeeking || state == kStarting || state == kStopping; @@ -391,6 +380,8 @@ bool PipelineImpl::TransientState(State state) { PipelineImpl::State PipelineImpl::FindNextState(State current) { // TODO(scherkus): refactor InitializeTask() to make use of this function. if (current == kPausing) + return kFlushing; + if (current == kFlushing) return kSeeking; if (current == kSeeking) return kStarting; @@ -842,6 +833,8 @@ void PipelineImpl::FilterStateTransitionTask() { MediaFilter* filter = filters_[filters_.size() - remaining_transitions_]; if (state_ == kPausing) { filter->Pause(NewCallback(this, &PipelineImpl::OnFilterStateTransition)); + } else if (state_ == kFlushing) { + filter->Flush(NewCallback(this, &PipelineImpl::OnFilterStateTransition)); } else if (state_ == kSeeking) { filter->Seek(seek_timestamp_, NewCallback(this, &PipelineImpl::OnFilterStateTransition)); diff --git a/media/base/pipeline_impl.h b/media/base/pipeline_impl.h index a590b47..2486f0a 100644 --- a/media/base/pipeline_impl.h +++ b/media/base/pipeline_impl.h @@ -118,6 +118,7 @@ class PipelineImpl : public Pipeline, public FilterHost { kInitVideoRenderer, kPausing, kSeeking, + kFlushing, kStarting, kStarted, kEnded, diff --git a/media/filters/ffmpeg_video_decode_engine.cc b/media/filters/ffmpeg_video_decode_engine.cc index 6c5b16f..0454eb4 100644 --- a/media/filters/ffmpeg_video_decode_engine.cc +++ b/media/filters/ffmpeg_video_decode_engine.cc @@ -119,6 +119,15 @@ void FFmpegVideoDecodeEngine::EmptyThisBuffer( DecodeFrame(buffer); } +void FFmpegVideoDecodeEngine::FillThisBuffer(scoped_refptr<VideoFrame> frame) { + scoped_refptr<Buffer> buffer; + empty_this_buffer_callback_->Run(buffer); +} + +bool FFmpegVideoDecodeEngine::ProvidesBuffer() const { + return false; +} + // Try to decode frame when both input and output are ready. void FFmpegVideoDecodeEngine::DecodeFrame(scoped_refptr<Buffer> buffer) { scoped_refptr<VideoFrame> video_frame; @@ -157,7 +166,10 @@ void FFmpegVideoDecodeEngine::DecodeFrame(scoped_refptr<Buffer> buffer) { // If frame_decoded == 0, then no frame was produced. if (frame_decoded == 0) { - fill_this_buffer_callback_->Run(video_frame); + if (buffer->IsEndOfStream()) // We had started flushing. + fill_this_buffer_callback_->Run(video_frame); + else + empty_this_buffer_callback_->Run(buffer); return; } @@ -234,6 +246,10 @@ void FFmpegVideoDecodeEngine::Flush(Task* done_cb) { avcodec_flush_buffers(codec_context_); } +void FFmpegVideoDecodeEngine::Seek(Task* done_cb) { + AutoTaskRunner done_runner(done_cb); +} + VideoFrame::Format FFmpegVideoDecodeEngine::GetSurfaceFormat() const { // J (Motion JPEG) versions of YUV are full range 0..255. // Regular (MPEG) YUV is 16..240. diff --git a/media/filters/ffmpeg_video_decode_engine.h b/media/filters/ffmpeg_video_decode_engine.h index 3941c52..57d7fdf 100644 --- a/media/filters/ffmpeg_video_decode_engine.h +++ b/media/filters/ffmpeg_video_decode_engine.h @@ -16,9 +16,6 @@ struct AVStream; namespace media { -class InputBuffer; -class OmxCodec; - class FFmpegVideoDecodeEngine : public VideoDecodeEngine { public: FFmpegVideoDecodeEngine(); @@ -31,10 +28,12 @@ class FFmpegVideoDecodeEngine : public VideoDecodeEngine { FillThisBufferCallback* fill_buffer_callback, Task* done_cb); virtual void EmptyThisBuffer(scoped_refptr<Buffer> buffer); - virtual void FillThisBuffer(scoped_refptr<VideoFrame> frame) {} + virtual void FillThisBuffer(scoped_refptr<VideoFrame> frame); + virtual bool ProvidesBuffer() const; virtual void Stop(Task* done_cb); virtual void Pause(Task* done_cb); virtual void Flush(Task* done_cb); + virtual void Seek(Task* done_cb); virtual VideoFrame::Format GetSurfaceFormat() const; virtual State state() const { return state_; } diff --git a/media/filters/ffmpeg_video_decode_engine_unittest.cc b/media/filters/ffmpeg_video_decode_engine_unittest.cc index d335b6e..deb1305 100644 --- a/media/filters/ffmpeg_video_decode_engine_unittest.cc +++ b/media/filters/ffmpeg_video_decode_engine_unittest.cc @@ -55,6 +55,13 @@ class FFmpegVideoDecodeEngineTest : public testing::Test { test_engine_.reset(new FFmpegVideoDecodeEngine()); test_engine_->SetCodecContextForTest(&codec_context_); + + VideoFrame::CreateFrame(VideoFrame::YV12, + kWidth, + kHeight, + StreamSample::kInvalidTimestamp, + StreamSample::kInvalidTimestamp, + &video_frame_); } ~FFmpegVideoDecodeEngineTest() { @@ -79,15 +86,17 @@ class FFmpegVideoDecodeEngineTest : public testing::Test { test_engine_->Initialize( MessageLoop::current(), &stream_, - NULL, - NewCallback(this, &FFmpegVideoDecodeEngineTest::OnDecodeComplete), + NewCallback(this, &FFmpegVideoDecodeEngineTest::OnEmptyBufferDone), + NewCallback(this, &FFmpegVideoDecodeEngineTest::OnFillBufferDone), done_cb.CreateTask()); EXPECT_EQ(VideoDecodeEngine::kNormal, test_engine_->state()); } public: - MOCK_METHOD1(OnDecodeComplete, + MOCK_METHOD1(OnFillBufferDone, void(scoped_refptr<VideoFrame> video_frame)); + MOCK_METHOD1(OnEmptyBufferDone, + void(scoped_refptr<Buffer> buffer)); scoped_refptr<VideoFrame> video_frame_; protected: @@ -181,6 +190,10 @@ TEST_F(FFmpegVideoDecodeEngineTest, Initialize_OpenDecoderFails) { EXPECT_EQ(VideoDecodeEngine::kError, test_engine_->state()); } +ACTION_P2(DemuxComplete, engine, buffer) { + engine->EmptyThisBuffer(buffer); +} + ACTION_P(DecodeComplete, decoder) { decoder->video_frame_ = arg0; } @@ -202,9 +215,11 @@ TEST_F(FFmpegVideoDecodeEngineTest, DecodeFrame_Normal) { .WillOnce(DoAll(SetArgumentPointee<2>(1), // Simulate 1 byte frame. Return(0))); - EXPECT_CALL(*this, OnDecodeComplete(_)) - .WillOnce(DecodeComplete(this)); - test_engine_->EmptyThisBuffer(buffer_); + EXPECT_CALL(*this, OnEmptyBufferDone(_)) + .WillOnce(DemuxComplete(test_engine_.get(), buffer_)); + EXPECT_CALL(*this, OnFillBufferDone(_)) + .WillOnce(DecodeComplete(this)); + test_engine_->FillThisBuffer(video_frame_); // |video_frame_| timestamp is 0 because we set the timestamp based off // the buffer timestamp. @@ -217,16 +232,23 @@ TEST_F(FFmpegVideoDecodeEngineTest, DecodeFrame_0ByteFrame) { Initialize(); // Expect a bunch of avcodec calls. - EXPECT_CALL(mock_ffmpeg_, AVInitPacket(_)); + EXPECT_CALL(mock_ffmpeg_, AVInitPacket(_)) + .Times(2); EXPECT_CALL(mock_ffmpeg_, AVCodecDecodeVideo2(&codec_context_, &yuv_frame_, _, _)) .WillOnce(DoAll(SetArgumentPointee<2>(0), // Simulate 0 byte frame. + Return(0))) + .WillOnce(DoAll(SetArgumentPointee<2>(1), // Simulate 1 byte frame. Return(0))); - EXPECT_CALL(*this, OnDecodeComplete(_)) - .WillOnce(DecodeComplete(this)); - test_engine_->EmptyThisBuffer(buffer_); - EXPECT_FALSE(video_frame_.get()); + EXPECT_CALL(*this, OnEmptyBufferDone(_)) + .WillOnce(DemuxComplete(test_engine_.get(), buffer_)) + .WillOnce(DemuxComplete(test_engine_.get(), buffer_)); + EXPECT_CALL(*this, OnFillBufferDone(_)) + .WillOnce(DecodeComplete(this)); + test_engine_->FillThisBuffer(video_frame_); + + EXPECT_TRUE(video_frame_.get()); } TEST_F(FFmpegVideoDecodeEngineTest, DecodeFrame_DecodeError) { @@ -238,9 +260,12 @@ TEST_F(FFmpegVideoDecodeEngineTest, DecodeFrame_DecodeError) { AVCodecDecodeVideo2(&codec_context_, &yuv_frame_, _, _)) .WillOnce(Return(-1)); - EXPECT_CALL(*this, OnDecodeComplete(_)) - .WillOnce(DecodeComplete(this)); - test_engine_->EmptyThisBuffer(buffer_); + EXPECT_CALL(*this, OnEmptyBufferDone(_)) + .WillOnce(DemuxComplete(test_engine_.get(), buffer_)); + EXPECT_CALL(*this, OnFillBufferDone(_)) + .WillOnce(DecodeComplete(this)); + test_engine_->FillThisBuffer(video_frame_); + EXPECT_FALSE(video_frame_.get()); } diff --git a/media/filters/ffmpeg_video_decoder.cc b/media/filters/ffmpeg_video_decoder.cc index 4496505..2f94214 100644 --- a/media/filters/ffmpeg_video_decoder.cc +++ b/media/filters/ffmpeg_video_decoder.cc @@ -7,7 +7,9 @@ #include <deque> #include "base/task.h" +#include "media/base/callback.h" #include "media/base/filters.h" +#include "media/base/filter_host.h" #include "media/base/limits.h" #include "media/base/media_format.h" #include "media/base/video_frame.h" @@ -23,22 +25,35 @@ FFmpegVideoDecoder::FFmpegVideoDecoder(VideoDecodeEngine* engine) : width_(0), height_(0), time_base_(new AVRational()), - state_(kNormal), - decode_engine_(engine) { + state_(kUnInitialized), + decode_engine_(engine), + pending_reads_(0), + pending_requests_(0) { } FFmpegVideoDecoder::~FFmpegVideoDecoder() { } -void FFmpegVideoDecoder::DoInitialize(DemuxerStream* demuxer_stream, - bool* success, - Task* done_cb) { - AutoTaskRunner done_runner(done_cb); - *success = false; +void FFmpegVideoDecoder::Initialize(DemuxerStream* demuxer_stream, + FilterCallback* callback) { + if (MessageLoop::current() != message_loop()) { + message_loop()->PostTask(FROM_HERE, + NewRunnableMethod(this, + &FFmpegVideoDecoder::Initialize, + demuxer_stream, + callback)); + return; + } + + DCHECK_EQ(MessageLoop::current(), message_loop()); + DCHECK(!demuxer_stream_); + + demuxer_stream_ = demuxer_stream; // Get the AVStream by querying for the provider interface. AVStreamProvider* av_stream_provider; if (!demuxer_stream->QueryInterface(&av_stream_provider)) { + FFmpegVideoDecoder::OnInitializeComplete(callback); return; } AVStream* av_stream = av_stream_provider->GetAVStream(); @@ -53,25 +68,27 @@ void FFmpegVideoDecoder::DoInitialize(DemuxerStream* demuxer_stream, if (width_ > Limits::kMaxDimension || height_ > Limits::kMaxDimension || (width_ * height_) > Limits::kMaxCanvas) { + FFmpegVideoDecoder::OnInitializeComplete(callback); return; } decode_engine_->Initialize( message_loop(), av_stream, - NewCallback(this, &FFmpegVideoDecoder::OnEmptyBufferDone), - NewCallback(this, &FFmpegVideoDecoder::OnDecodeComplete), + NewCallback(this, &FFmpegVideoDecoder::OnEngineEmptyBufferDone), + NewCallback(this, &FFmpegVideoDecoder::OnEngineFillBufferDone), NewRunnableMethod(this, &FFmpegVideoDecoder::OnInitializeComplete, - success, - done_runner.release())); + callback)); } -void FFmpegVideoDecoder::OnInitializeComplete(bool* success, Task* done_cb) { - AutoTaskRunner done_runner(done_cb); +void FFmpegVideoDecoder::OnInitializeComplete(FilterCallback* callback) { + CHECK_EQ(MessageLoop::current(), message_loop()); - *success = decode_engine_->state() == VideoDecodeEngine::kNormal; - if (*success) { + AutoCallbackRunner done_runner(callback); + + bool success = decode_engine_->state() == VideoDecodeEngine::kNormal; + if (success) { media_format_.SetAsString(MediaFormat::kMimeType, mime_type::kUncompressedVideo); media_format_.SetAsInteger(MediaFormat::kWidth, width_); @@ -82,32 +99,104 @@ void FFmpegVideoDecoder::OnInitializeComplete(bool* success, Task* done_cb) { media_format_.SetAsInteger( MediaFormat::kSurfaceFormat, static_cast<int>(decode_engine_->GetSurfaceFormat())); + state_ = kNormal; + } else { + host()->SetError(PIPELINE_ERROR_DECODE); + } +} + +void FFmpegVideoDecoder::Stop(FilterCallback* callback) { + if (MessageLoop::current() != message_loop()) { + message_loop()->PostTask(FROM_HERE, + NewRunnableMethod(this, + &FFmpegVideoDecoder::Stop, + callback)); + return; } + + DCHECK_EQ(MessageLoop::current(), message_loop()); + + decode_engine_->Stop( + NewRunnableMethod(this, &FFmpegVideoDecoder::OnStopComplete, callback)); } -void FFmpegVideoDecoder::DoStop(Task* done_cb) { - decode_engine_->Stop(done_cb); +void FFmpegVideoDecoder::OnStopComplete(FilterCallback* callback) { + DCHECK_EQ(MessageLoop::current(), message_loop()); + + AutoCallbackRunner done_runner(callback); + state_ = kStopped; } -void FFmpegVideoDecoder::DoSeek(base::TimeDelta time, Task* done_cb) { +void FFmpegVideoDecoder::Flush(FilterCallback* callback) { + if (MessageLoop::current() != message_loop()) { + message_loop()->PostTask(FROM_HERE, + NewRunnableMethod(this, + &FFmpegVideoDecoder::Flush, + callback)); + return; + } + + DCHECK_EQ(MessageLoop::current(), message_loop()); + // Everything in the presentation time queue is invalid, clear the queue. while (!pts_heap_.IsEmpty()) pts_heap_.Pop(); - // We're back where we started. It should be completely safe to flush here - // since DecoderBase uses |expecting_discontinuous_| to verify that the next - // time DoDecode() is called we will have a discontinuous buffer. - // - // TODO(ajwong): Should we put a guard here to prevent leaving kError. + decode_engine_->Flush( + NewRunnableMethod(this, &FFmpegVideoDecoder::OnFlushComplete, callback)); +} + +void FFmpegVideoDecoder::OnFlushComplete(FilterCallback* callback) { + DCHECK_EQ(MessageLoop::current(), message_loop()); + + AutoCallbackRunner done_runner(callback); + + // Since we are sending Flush() in reverse order of filter. (i.e. flushing + // renderer before decoder). we could guaranteed the following invariant. + // TODO(jiesun): when we move to parallel Flush, we should remove this. + DCHECK_EQ(0u, pending_reads_) << "Pending reads should have completed"; + DCHECK_EQ(0u, pending_requests_) << "Pending requests should be empty"; + +} + +void FFmpegVideoDecoder::Seek(base::TimeDelta time, + FilterCallback* callback) { + if (MessageLoop::current() != message_loop()) { + message_loop()->PostTask(FROM_HERE, + NewRunnableMethod(this, + &FFmpegVideoDecoder::Seek, + time, + callback)); + return; + } + + DCHECK_EQ(MessageLoop::current(), message_loop()); + + decode_engine_->Seek( + NewRunnableMethod(this, &FFmpegVideoDecoder::OnSeekComplete, callback)); +} + +void FFmpegVideoDecoder::OnSeekComplete(FilterCallback* callback) { + DCHECK_EQ(MessageLoop::current(), message_loop()); + + AutoCallbackRunner done_runner(callback); state_ = kNormal; +} - decode_engine_->Flush(done_cb); +void FFmpegVideoDecoder::OnReadComplete(Buffer* buffer_in) { + scoped_refptr<Buffer> buffer = buffer_in; + message_loop()->PostTask( + FROM_HERE, + NewRunnableMethod(this, + &FFmpegVideoDecoder::OnReadCompleteTask, + buffer)); } -void FFmpegVideoDecoder::DoDecode(Buffer* buffer) { - // TODO(ajwong): This DoDecode() and OnDecodeComplete() set of functions is - // too complicated to easily unittest. The test becomes fragile. Try to - // find a way to reorganize into smaller units for testing. +void FFmpegVideoDecoder::OnReadCompleteTask(scoped_refptr<Buffer> buffer) { + DCHECK_EQ(MessageLoop::current(), message_loop()); + DCHECK_GT(pending_reads_, 0u); + + --pending_reads_; // During decode, because reads are issued asynchronously, it is possible to // receive multiple end of stream buffers since each read is acked. When the @@ -135,9 +224,14 @@ void FFmpegVideoDecoder::DoDecode(Buffer* buffer) { // Any time buffer->IsDiscontinuous() is true. // // If the decoding is finished, we just always return empty frames. - if (state_ == kDecodeFinished) { - EnqueueEmptyFrame(); - OnEmptyBufferDone(NULL); + if (state_ == kDecodeFinished || state_ == kStopped) { + DCHECK(buffer->IsEndOfStream()); + + --pending_requests_; + // Signal VideoRenderer the end of the stream event. + scoped_refptr<VideoFrame> video_frame; + VideoFrame::CreateEmptyFrame(&video_frame); + fill_buffer_done_callback()->Run(video_frame); return; } @@ -152,7 +246,7 @@ void FFmpegVideoDecoder::DoDecode(Buffer* buffer) { // // TODO(ajwong): This push logic, along with the pop logic below needs to // be reevaluated to correctly handle decode errors. - if (state_ == kNormal && + if (state_ == kNormal && !buffer->IsEndOfStream() && buffer->GetTimestamp() != StreamSample::kInvalidTimestamp) { pts_heap_.Push(buffer->GetTimestamp()); } @@ -161,8 +255,36 @@ void FFmpegVideoDecoder::DoDecode(Buffer* buffer) { decode_engine_->EmptyThisBuffer(buffer); } -void FFmpegVideoDecoder::OnDecodeComplete( +void FFmpegVideoDecoder::FillThisBuffer( scoped_refptr<VideoFrame> video_frame) { + if (MessageLoop::current() != message_loop()) { + message_loop()->PostTask( + FROM_HERE, + NewRunnableMethod(this, + &FFmpegVideoDecoder::FillThisBuffer, + video_frame)); + return; + } + + DCHECK_EQ(MessageLoop::current(), message_loop()); + + // Synchronized flushing before stop should prevent this. + if (state_ == kStopped) + return; // Discard the video frame. + + // Notify decode engine the available of new frame. + ++pending_requests_; + decode_engine_->FillThisBuffer(video_frame); +} + +void FFmpegVideoDecoder::OnEngineFillBufferDone( + scoped_refptr<VideoFrame> video_frame) { + DCHECK_EQ(MessageLoop::current(), message_loop()); + + // TODO(jiesun): Flush before stop will prevent this from happening. + if (state_ == kStopped) + return; // Discard the video frame. + if (video_frame.get()) { // If we actually got data back, enqueue a frame. last_pts_ = FindPtsAndDuration(*time_base_, &pts_heap_, last_pts_, @@ -170,39 +292,35 @@ void FFmpegVideoDecoder::OnDecodeComplete( video_frame->SetTimestamp(last_pts_.timestamp); video_frame->SetDuration(last_pts_.duration); - EnqueueVideoFrame(video_frame); + + // Deliver this frame to VideoRenderer. + --pending_requests_; + fill_buffer_done_callback()->Run(video_frame); } else { // When in kFlushCodec, any errored decode, or a 0-lengthed frame, // is taken as a signal to stop decoding. if (state_ == kFlushCodec) { state_ = kDecodeFinished; - EnqueueEmptyFrame(); + + --pending_requests_; + // Signal VideoRenderer the end of the stream event. + scoped_refptr<VideoFrame> video_frame; + VideoFrame::CreateEmptyFrame(&video_frame); + fill_buffer_done_callback()->Run(video_frame); } } - - OnEmptyBufferDone(NULL); } -void FFmpegVideoDecoder::OnEmptyBufferDone(scoped_refptr<Buffer> buffer) { - // Currently we just ignore the returned buffer. - DecoderBase<VideoDecoder, VideoFrame>::OnDecodeComplete(); -} +void FFmpegVideoDecoder::OnEngineEmptyBufferDone( + scoped_refptr<Buffer> buffer) { + DCHECK_EQ(MessageLoop::current(), message_loop()); + DCHECK_LE(pending_reads_, pending_requests_); -void FFmpegVideoDecoder::FillThisBuffer(scoped_refptr<VideoFrame> frame) { - DecoderBase<VideoDecoder, VideoFrame>::FillThisBuffer(frame); - // Notify decode engine the available of new frame. - decode_engine_->FillThisBuffer(frame); -} - -void FFmpegVideoDecoder::EnqueueVideoFrame( - const scoped_refptr<VideoFrame>& video_frame) { - EnqueueResult(video_frame); -} - -void FFmpegVideoDecoder::EnqueueEmptyFrame() { - scoped_refptr<VideoFrame> video_frame; - VideoFrame::CreateEmptyFrame(&video_frame); - EnqueueResult(video_frame); + if (state_ != kDecodeFinished) { + demuxer_stream_->Read( + NewCallback(this, &FFmpegVideoDecoder::OnReadComplete)); + ++pending_reads_; + } } FFmpegVideoDecoder::TimeTuple FFmpegVideoDecoder::FindPtsAndDuration( @@ -254,9 +372,9 @@ FFmpegVideoDecoder::TimeTuple FFmpegVideoDecoder::FindPtsAndDuration( return pts; } -void FFmpegVideoDecoder::SignalPipelineError() { - host()->SetError(PIPELINE_ERROR_DECODE); - state_ = kDecodeFinished; +bool FFmpegVideoDecoder::ProvidesBuffer() { + if (!decode_engine_.get()) return false; + return decode_engine_->ProvidesBuffer(); } void FFmpegVideoDecoder::SetVideoDecodeEngineForTest( diff --git a/media/filters/ffmpeg_video_decoder.h b/media/filters/ffmpeg_video_decoder.h index 65c4b75..92b6e78 100644 --- a/media/filters/ffmpeg_video_decoder.h +++ b/media/filters/ffmpeg_video_decoder.h @@ -7,6 +7,8 @@ #include "base/gtest_prod_util.h" #include "base/time.h" +#include "media/base/factory.h" +#include "media/base/filters.h" #include "media/base/pts_heap.h" #include "media/base/video_frame.h" #include "media/filters/decoder_base.h" @@ -18,7 +20,7 @@ namespace media { class VideoDecodeEngine; -class FFmpegVideoDecoder : public DecoderBase<VideoDecoder, VideoFrame> { +class FFmpegVideoDecoder : public VideoDecoder { public: explicit FFmpegVideoDecoder(VideoDecodeEngine* engine); virtual ~FFmpegVideoDecoder(); @@ -26,16 +28,17 @@ class FFmpegVideoDecoder : public DecoderBase<VideoDecoder, VideoFrame> { static FilterFactory* CreateFactory(); static bool IsMediaFormatSupported(const MediaFormat& media_format); - protected: - virtual void DoInitialize(DemuxerStream* demuxer_stream, bool* success, - Task* done_cb); - virtual void DoStop(Task* done_cb); - virtual void DoSeek(base::TimeDelta time, Task* done_cb); - virtual void DoDecode(Buffer* input); + // MediaFilter implementation. + virtual void Stop(FilterCallback* callback); + virtual void Seek(base::TimeDelta time, FilterCallback* callback); + virtual void Flush(FilterCallback* callback); - protected: - virtual void OnEmptyBufferDone(scoped_refptr<Buffer> buffer); - virtual void FillThisBuffer(scoped_refptr<VideoFrame> frame); + // Decoder implementation. + virtual void Initialize(DemuxerStream* demuxer_stream, + FilterCallback* callback); + virtual const MediaFormat& media_format() { return media_format_; } + virtual void FillThisBuffer(scoped_refptr<VideoFrame> video_frame); + virtual bool ProvidesBuffer(); private: friend class FilterFactoryImpl1<FFmpegVideoDecoder, VideoDecodeEngine*>; @@ -58,26 +61,29 @@ class FFmpegVideoDecoder : public DecoderBase<VideoDecoder, VideoFrame> { }; enum DecoderState { + kUnInitialized, kNormal, kFlushCodec, kDecodeFinished, + kStopped }; - // Implement DecoderBase template methods. - virtual void EnqueueVideoFrame(const scoped_refptr<VideoFrame>& video_frame); + void OnInitializeComplete(FilterCallback* done_cb); + void OnStopComplete(FilterCallback* callback); + void OnFlushComplete(FilterCallback* callback); + void OnSeekComplete(FilterCallback* callback); + void OnReadComplete(Buffer* buffer); - // Create an empty video frame and queue it. - virtual void EnqueueEmptyFrame(); + // TODO(jiesun): until demuxer pass scoped_refptr<Buffer>: we could not merge + // this with OnReadComplete + void OnReadCompleteTask(scoped_refptr<Buffer> buffer); - // Methods that pickup after the decode engine has finished its action. - virtual void OnInitializeComplete(bool* success /* Not owned */, - Task* done_cb); - - virtual void OnDecodeComplete(scoped_refptr<VideoFrame> video_frame); + virtual void OnEngineEmptyBufferDone(scoped_refptr<Buffer> buffer); + virtual void OnEngineFillBufferDone(scoped_refptr<VideoFrame> video_frame); // Attempt to get the PTS and Duration for this frame by examining the time // info provided via packet stream (stored in |pts_heap|), or the info - // writen into the AVFrame itself. If no data is available in either, then + // written into the AVFrame itself. If no data is available in either, then // attempt to generate a best guess of the pts based on the last known pts. // // Data inside the AVFrame (if provided) is trusted the most, followed @@ -88,16 +94,13 @@ class FFmpegVideoDecoder : public DecoderBase<VideoDecoder, VideoFrame> { const TimeTuple& last_pts, const VideoFrame* frame); - // Signals the pipeline that a decode error occurs, and moves the decoder - // into the kDecodeFinished state. - virtual void SignalPipelineError(); - // Injection point for unittest to provide a mock engine. Takes ownership of // the provided engine. virtual void SetVideoDecodeEngineForTest(VideoDecodeEngine* engine); size_t width_; size_t height_; + MediaFormat media_format_; PtsHeap pts_heap_; // Heap of presentation timestamps. TimeTuple last_pts_; @@ -105,6 +108,15 @@ class FFmpegVideoDecoder : public DecoderBase<VideoDecoder, VideoFrame> { DecoderState state_; scoped_ptr<VideoDecodeEngine> decode_engine_; + // Tracks the number of asynchronous reads issued to |demuxer_stream_|. + // Using size_t since it is always compared against deque::size(). + size_t pending_reads_; + // Tracks the number of asynchronous reads issued from renderer. + size_t pending_requests_; + + // Pointer to the demuxer stream that will feed us compressed buffers. + scoped_refptr<DemuxerStream> demuxer_stream_; + DISALLOW_COPY_AND_ASSIGN(FFmpegVideoDecoder); }; diff --git a/media/filters/ffmpeg_video_decoder_unittest.cc b/media/filters/ffmpeg_video_decoder_unittest.cc index 228d3c8..cff038e 100644 --- a/media/filters/ffmpeg_video_decoder_unittest.cc +++ b/media/filters/ffmpeg_video_decoder_unittest.cc @@ -59,8 +59,10 @@ class MockVideoDecodeEngine : public VideoDecodeEngine { MOCK_METHOD1(Stop, void(Task* done_cb)); MOCK_METHOD1(Pause, void(Task* done_cb)); MOCK_METHOD1(Flush, void(Task* done_cb)); + MOCK_METHOD1(Seek, void(Task* done_cb)); MOCK_CONST_METHOD0(state, State()); MOCK_CONST_METHOD0(GetSurfaceFormat, VideoFrame::Format()); + MOCK_CONST_METHOD0(ProvidesBuffer, bool()); scoped_ptr<FillThisBufferCallback> fill_buffer_callback_; scoped_ptr<EmptyThisBufferCallback> empty_buffer_callback_; @@ -73,23 +75,26 @@ class DecoderPrivateMock : public FFmpegVideoDecoder { : FFmpegVideoDecoder(engine) { } - MOCK_METHOD1(EnqueueVideoFrame, - void(const scoped_refptr<VideoFrame>& video_frame)); - MOCK_METHOD0(EnqueueEmptyFrame, void()); - MOCK_METHOD4(FindPtsAndDuration, TimeTuple( - const AVRational& time_base, - PtsHeap* pts_heap, - const TimeTuple& last_pts, - const VideoFrame* frame)); - MOCK_METHOD0(SignalPipelineError, void()); - MOCK_METHOD1(OnEmptyBufferDone, void(scoped_refptr<Buffer> buffer)); - // change access qualifier for test: used in actions. - void OnDecodeComplete(scoped_refptr<VideoFrame> video_frame) { - FFmpegVideoDecoder::OnDecodeComplete(video_frame); + void OnEngineEmptyBufferDone(scoped_refptr<Buffer> buffer) { + FFmpegVideoDecoder::OnEngineEmptyBufferDone(buffer); + } + void OnEngineFillBufferDone(scoped_refptr<VideoFrame> frame) { + FFmpegVideoDecoder::OnEngineFillBufferDone(frame); + } + void OnReadComplete(Buffer* buffer) { + FFmpegVideoDecoder::OnReadComplete(buffer); } }; +ACTION_P(SaveFillCallback, engine) { + engine->fill_buffer_callback_.reset(arg3); +} + +ACTION_P(SaveEmptyCallback, engine) { + engine->empty_buffer_callback_.reset(arg2); +} + // Fixture class to facilitate writing tests. Takes care of setting up the // FFmpeg, pipeline and filter host mocks. class FFmpegVideoDecoderTest : public testing::Test { @@ -98,6 +103,7 @@ class FFmpegVideoDecoderTest : public testing::Test { static const int kHeight; static const FFmpegVideoDecoder::TimeTuple kTestPts1; static const FFmpegVideoDecoder::TimeTuple kTestPts2; + static const FFmpegVideoDecoder::TimeTuple kTestPts3; FFmpegVideoDecoderTest() { MediaFormat media_format; @@ -107,7 +113,8 @@ class FFmpegVideoDecoderTest : public testing::Test { // // TODO(ajwong): Break the test's dependency on FFmpegVideoDecoder. factory_ = FFmpegVideoDecoder::CreateFactory(); - decoder_ = factory_->Create<FFmpegVideoDecoder>(media_format); + decoder_ = factory_->Create<DecoderPrivateMock>(media_format); + renderer_ = new MockVideoRenderer(); engine_ = new StrictMock<MockVideoDecodeEngine>(); DCHECK(decoder_); @@ -130,6 +137,8 @@ class FFmpegVideoDecoderTest : public testing::Test { stream_.codec = &codec_context_; codec_context_.width = kWidth; codec_context_.height = kHeight; + stream_.r_frame_rate.num = 1; + stream_.r_frame_rate.den = 1; buffer_ = new DataBuffer(1); end_of_stream_buffer_ = new DataBuffer(0); @@ -152,10 +161,34 @@ class FFmpegVideoDecoderTest : public testing::Test { MockFFmpeg::set(NULL); } + void InitializeDecoderSuccessfully() { + // Test successful initialization. + AVStreamProvider* av_stream_provider = demuxer_; + EXPECT_CALL(*demuxer_, QueryInterface(AVStreamProvider::interface_id())) + .WillOnce(Return(av_stream_provider)); + EXPECT_CALL(*demuxer_, GetAVStream()) + .WillOnce(Return(&stream_)); + + EXPECT_CALL(*engine_, Initialize(_, _, _, _, _)) + .WillOnce(DoAll(SaveFillCallback(engine_), + SaveEmptyCallback(engine_), + WithArg<4>(InvokeRunnable()))); + EXPECT_CALL(*engine_, state()) + .WillOnce(Return(VideoDecodeEngine::kNormal)); + EXPECT_CALL(*engine_, GetSurfaceFormat()) + .WillOnce(Return(VideoFrame::YV12)); + + EXPECT_CALL(callback_, OnFilterCallback()); + EXPECT_CALL(callback_, OnCallbackDestroyed()); + + decoder_->Initialize(demuxer_, callback_.NewCallback()); + message_loop_.RunAllPending(); + } // Fixture members. scoped_refptr<FilterFactory> factory_; MockVideoDecodeEngine* engine_; // Owned by |decoder_|. - scoped_refptr<FFmpegVideoDecoder> decoder_; + scoped_refptr<DecoderPrivateMock> decoder_; + scoped_refptr<MockVideoRenderer> renderer_; scoped_refptr<StrictMock<MockFFmpegDemuxerStream> > demuxer_; scoped_refptr<DataBuffer> buffer_; scoped_refptr<DataBuffer> end_of_stream_buffer_; @@ -183,6 +216,9 @@ const FFmpegVideoDecoder::TimeTuple FFmpegVideoDecoderTest::kTestPts1 = const FFmpegVideoDecoder::TimeTuple FFmpegVideoDecoderTest::kTestPts2 = { base::TimeDelta::FromMicroseconds(456), base::TimeDelta::FromMicroseconds(60) }; +const FFmpegVideoDecoder::TimeTuple FFmpegVideoDecoderTest::kTestPts3 = + { base::TimeDelta::FromMicroseconds(789), + base::TimeDelta::FromMicroseconds(60) }; TEST(FFmpegVideoDecoderFactoryTest, Create) { // Should only accept video/x-ffmpeg mime type. @@ -208,19 +244,13 @@ TEST_F(FFmpegVideoDecoderTest, Initialize_QueryInterfaceFails) { EXPECT_CALL(host_, SetError(PIPELINE_ERROR_DECODE)); EXPECT_CALL(callback_, OnFilterCallback()); EXPECT_CALL(callback_, OnCallbackDestroyed()); + EXPECT_CALL(*engine_, state()) + .WillOnce(Return(VideoDecodeEngine::kCreated)); decoder_->Initialize(demuxer_, callback_.NewCallback()); message_loop_.RunAllPending(); } -ACTION_P(SaveFillCallback, engine) { - engine->fill_buffer_callback_.reset(arg3); -} - -ACTION_P(SaveEmptyCallback, engine) { - engine->empty_buffer_callback_.reset(arg2); -} - TEST_F(FFmpegVideoDecoderTest, Initialize_EngineFails) { // Test successful initialization. AVStreamProvider* av_stream_provider = demuxer_; @@ -246,27 +276,7 @@ TEST_F(FFmpegVideoDecoderTest, Initialize_EngineFails) { } TEST_F(FFmpegVideoDecoderTest, Initialize_Successful) { - // Test successful initialization. - AVStreamProvider* av_stream_provider = demuxer_; - EXPECT_CALL(*demuxer_, QueryInterface(AVStreamProvider::interface_id())) - .WillOnce(Return(av_stream_provider)); - EXPECT_CALL(*demuxer_, GetAVStream()) - .WillOnce(Return(&stream_)); - - EXPECT_CALL(*engine_, Initialize(_, _, _, _, _)) - .WillOnce(DoAll(SaveFillCallback(engine_), - SaveEmptyCallback(engine_), - WithArg<4>(InvokeRunnable()))); - EXPECT_CALL(*engine_, state()) - .WillOnce(Return(VideoDecodeEngine::kNormal)); - EXPECT_CALL(*engine_, GetSurfaceFormat()) - .WillOnce(Return(VideoFrame::YV12)); - - EXPECT_CALL(callback_, OnFilterCallback()); - EXPECT_CALL(callback_, OnCallbackDestroyed()); - - decoder_->Initialize(demuxer_, callback_.NewCallback()); - message_loop_.RunAllPending(); + InitializeDecoderSuccessfully(); // Test that the output media format is an uncompressed video surface that // matches the dimensions specified by FFmpeg. @@ -355,13 +365,26 @@ TEST_F(FFmpegVideoDecoderTest, FindPtsAndDuration) { EXPECT_EQ(789, result_pts.duration.InMicroseconds()); } -ACTION_P2(DecodeComplete, decoder, video_frame) { - decoder->OnDecodeComplete(video_frame); +ACTION_P2(ReadFromDemux, decoder, buffer) { + decoder->OnEngineEmptyBufferDone(buffer); +} + +ACTION_P3(ReturnFromDemux, decoder, buffer, time_tuple) { + delete arg0; + buffer->SetTimestamp(time_tuple.timestamp); + buffer->SetDuration(time_tuple.duration); + decoder->OnReadComplete(buffer); } -ACTION_P2(DecodeNotComplete, decoder, video_frame) { +ACTION_P3(DecodeComplete, decoder, video_frame, time_tuple) { + video_frame->SetTimestamp(time_tuple.timestamp); + video_frame->SetDuration(time_tuple.duration); + decoder->OnEngineFillBufferDone(video_frame); +} +ACTION_P2(DecodeNotComplete, decoder, buffer) { scoped_refptr<VideoFrame> null_frame; - decoder->OnDecodeComplete(null_frame); + decoder->OnEngineFillBufferDone(null_frame); + decoder->OnEngineEmptyBufferDone(buffer); } ACTION_P(ConsumePTS, pts_heap) { @@ -379,100 +402,80 @@ TEST_F(FFmpegVideoDecoderTest, DoDecode_TestStateTransition) { // 4) kDecodeFinished is never left regardless of what kind of buffer is // given. // 5) All state transitions happen as expected. - MockVideoDecodeEngine* mock_engine = new StrictMock<MockVideoDecodeEngine>(); - scoped_refptr<DecoderPrivateMock> mock_decoder = - new StrictMock<DecoderPrivateMock>(mock_engine); - mock_decoder->set_message_loop(&message_loop_); + InitializeDecoderSuccessfully(); - // Setup decoder to buffer one frame, decode one frame, fail one frame, - // decode one more, and then fail the last one to end decoding. - EXPECT_CALL(*mock_engine, EmptyThisBuffer(_)) - .WillOnce(DecodeNotComplete(mock_decoder.get(), video_frame_)) - .WillOnce(DecodeComplete(mock_decoder.get(), video_frame_)) - .WillOnce(DecodeNotComplete(mock_decoder.get(), video_frame_)) - .WillOnce(DecodeComplete(mock_decoder.get(), video_frame_)) - .WillOnce(DecodeComplete(mock_decoder.get(), video_frame_)) - .WillOnce(DecodeNotComplete(mock_decoder.get(), video_frame_)); - PtsHeap* pts_heap = &mock_decoder->pts_heap_; - EXPECT_CALL(*mock_decoder, FindPtsAndDuration(_, _, _, _)) - .WillOnce(DoAll(ConsumePTS(pts_heap), Return(kTestPts1))) - .WillOnce(DoAll(ConsumePTS(pts_heap), Return(kTestPts2))) - .WillOnce(DoAll(ConsumePTS(pts_heap), Return(kTestPts1))); - EXPECT_CALL(*mock_decoder, EnqueueVideoFrame(_)) - .Times(3); - EXPECT_CALL(*mock_decoder, EnqueueEmptyFrame()) - .Times(1); - EXPECT_CALL(*mock_decoder, OnEmptyBufferDone(_)) - .Times(6); + decoder_->set_fill_buffer_done_callback( + NewCallback(renderer_.get(), &MockVideoRenderer::FillThisBufferDone)); // Setup initial state and check that it is sane. - ASSERT_EQ(FFmpegVideoDecoder::kNormal, mock_decoder->state_); - ASSERT_TRUE(base::TimeDelta() == mock_decoder->last_pts_.timestamp); - ASSERT_TRUE(base::TimeDelta() == mock_decoder->last_pts_.duration); - // Decode once, which should simulate a buffering call. - mock_decoder->DoDecode(buffer_); - EXPECT_EQ(FFmpegVideoDecoder::kNormal, mock_decoder->state_); - ASSERT_TRUE(base::TimeDelta() == mock_decoder->last_pts_.timestamp); - ASSERT_TRUE(base::TimeDelta() == mock_decoder->last_pts_.duration); - EXPECT_FALSE(mock_decoder->pts_heap_.IsEmpty()); - - // Decode a second time, which should yield the first frame. - mock_decoder->DoDecode(buffer_); - EXPECT_EQ(FFmpegVideoDecoder::kNormal, mock_decoder->state_); - EXPECT_TRUE(kTestPts1.timestamp == mock_decoder->last_pts_.timestamp); - EXPECT_TRUE(kTestPts1.duration == mock_decoder->last_pts_.duration); - EXPECT_FALSE(mock_decoder->pts_heap_.IsEmpty()); - - // Decode a third time, with a regular buffer. The decode will error - // out, but the state should be the same. - mock_decoder->DoDecode(buffer_); - EXPECT_EQ(FFmpegVideoDecoder::kNormal, mock_decoder->state_); - EXPECT_TRUE(kTestPts1.timestamp == mock_decoder->last_pts_.timestamp); - EXPECT_TRUE(kTestPts1.duration == mock_decoder->last_pts_.duration); - EXPECT_FALSE(mock_decoder->pts_heap_.IsEmpty()); - - // Decode a fourth time, with an end of stream buffer. This should - // yield the second frame, and stay in flushing mode. - mock_decoder->DoDecode(end_of_stream_buffer_); - EXPECT_EQ(FFmpegVideoDecoder::kFlushCodec, mock_decoder->state_); - EXPECT_TRUE(kTestPts2.timestamp == mock_decoder->last_pts_.timestamp); - EXPECT_TRUE(kTestPts2.duration == mock_decoder->last_pts_.duration); - EXPECT_FALSE(mock_decoder->pts_heap_.IsEmpty()); - - // Decode a fifth time with an end of stream buffer. this should - // yield the third frame. - mock_decoder->DoDecode(end_of_stream_buffer_); - EXPECT_EQ(FFmpegVideoDecoder::kFlushCodec, mock_decoder->state_); - EXPECT_TRUE(kTestPts1.timestamp == mock_decoder->last_pts_.timestamp); - EXPECT_TRUE(kTestPts1.duration == mock_decoder->last_pts_.duration); - EXPECT_TRUE(mock_decoder->pts_heap_.IsEmpty()); - - // Decode a sixth time with an end of stream buffer. This should - // Move into kDecodeFinished. - mock_decoder->DoDecode(end_of_stream_buffer_); - EXPECT_EQ(FFmpegVideoDecoder::kDecodeFinished, mock_decoder->state_); - EXPECT_TRUE(kTestPts1.timestamp == mock_decoder->last_pts_.timestamp); - EXPECT_TRUE(kTestPts1.duration == mock_decoder->last_pts_.duration); - EXPECT_TRUE(mock_decoder->pts_heap_.IsEmpty()); -} - -TEST_F(FFmpegVideoDecoderTest, DoDecode_FinishEnqueuesEmptyFrames) { - MockVideoDecodeEngine* mock_engine = new StrictMock<MockVideoDecodeEngine>(); - scoped_refptr<DecoderPrivateMock> mock_decoder = - new StrictMock<DecoderPrivateMock>(mock_engine); - - // Move the decoder into the finished state for this test. - mock_decoder->state_ = FFmpegVideoDecoder::kDecodeFinished; - - // Expect 2 calls, make two calls. If kDecodeFinished is set, the buffer is - // not even examined. - EXPECT_CALL(*mock_decoder, EnqueueEmptyFrame()).Times(3); - EXPECT_CALL(*mock_decoder, OnEmptyBufferDone(_)).Times(3); + ASSERT_EQ(FFmpegVideoDecoder::kNormal, decoder_->state_); + ASSERT_TRUE(base::TimeDelta() == decoder_->last_pts_.timestamp); + ASSERT_TRUE(base::TimeDelta() == decoder_->last_pts_.duration); - mock_decoder->DoDecode(NULL); - mock_decoder->DoDecode(buffer_); - mock_decoder->DoDecode(end_of_stream_buffer_); - EXPECT_EQ(FFmpegVideoDecoder::kDecodeFinished, mock_decoder->state_); + // Setup decoder to buffer one frame, decode one frame, fail one frame, + // decode one more, and then fail the last one to end decoding. + EXPECT_CALL(*engine_, FillThisBuffer(_)) + .Times(4) + .WillRepeatedly(ReadFromDemux(decoder_.get(), buffer_)); + EXPECT_CALL(*demuxer_.get(), Read(_)) + .Times(6) + .WillOnce(ReturnFromDemux(decoder_.get(), buffer_, kTestPts1)) + .WillOnce(ReturnFromDemux(decoder_.get(), buffer_, kTestPts3)) + .WillOnce(ReturnFromDemux(decoder_.get(), buffer_, kTestPts2)) + .WillOnce(ReturnFromDemux(decoder_.get(), + end_of_stream_buffer_, kTestPts3)) + .WillOnce(ReturnFromDemux(decoder_.get(), + end_of_stream_buffer_, kTestPts3)) + .WillOnce(ReturnFromDemux(decoder_.get(), + end_of_stream_buffer_, kTestPts3)); + EXPECT_CALL(*engine_, EmptyThisBuffer(_)) + .WillOnce(DecodeNotComplete(decoder_.get(), buffer_)) + .WillOnce(DecodeComplete(decoder_.get(), video_frame_, kTestPts1)) + .WillOnce(DecodeNotComplete(decoder_.get(), buffer_)) + .WillOnce(DecodeComplete(decoder_.get(), video_frame_, kTestPts2)) + .WillOnce(DecodeComplete(decoder_.get(), video_frame_, kTestPts3)) + .WillOnce(DecodeNotComplete(decoder_.get(), buffer_)); + EXPECT_CALL(*renderer_.get(), FillThisBufferDone(_)) + .Times(4); + + // First request from renderer: at first round decode engine did not produce + // any frame. Decoder will issue another read from demuxer. at second round + // decode engine will get a valid frame. + decoder_->FillThisBuffer(video_frame_); + message_loop_.RunAllPending(); + EXPECT_EQ(FFmpegVideoDecoder::kNormal, decoder_->state_); + ASSERT_TRUE(kTestPts1.timestamp == decoder_->last_pts_.timestamp); + ASSERT_TRUE(kTestPts1.duration == decoder_->last_pts_.duration); + EXPECT_FALSE(decoder_->pts_heap_.IsEmpty()); + + // Second request from renderer: at first round decode engine did not produce + // any frame. Decoder will issue another read from demuxer. at second round + // decode engine will get a valid frame. + decoder_->FillThisBuffer(video_frame_); + message_loop_.RunAllPending(); + EXPECT_EQ(FFmpegVideoDecoder::kFlushCodec, decoder_->state_); + EXPECT_TRUE(kTestPts2.timestamp == decoder_->last_pts_.timestamp); + EXPECT_TRUE(kTestPts2.duration == decoder_->last_pts_.duration); + EXPECT_FALSE(decoder_->pts_heap_.IsEmpty()); + + // Third request from renderer: decode engine will return frame on the + // first round. Input stream had reach EOS, therefore we had entered + // kFlushCodec state after this call. + decoder_->FillThisBuffer(video_frame_); + message_loop_.RunAllPending(); + EXPECT_EQ(FFmpegVideoDecoder::kFlushCodec, decoder_->state_); + EXPECT_TRUE(kTestPts3.timestamp == decoder_->last_pts_.timestamp); + EXPECT_TRUE(kTestPts3.duration == decoder_->last_pts_.duration); + EXPECT_TRUE(decoder_->pts_heap_.IsEmpty()); + + // Fourth request from renderer: Both input/output reach EOF. therefore + // we had reached the kDecodeFinished state after this call. + decoder_->FillThisBuffer(video_frame_); + message_loop_.RunAllPending(); + EXPECT_EQ(FFmpegVideoDecoder::kDecodeFinished, decoder_->state_); + EXPECT_TRUE(kTestPts3.timestamp == decoder_->last_pts_.timestamp); + EXPECT_TRUE(kTestPts3.duration == decoder_->last_pts_.duration); + EXPECT_TRUE(decoder_->pts_heap_.IsEmpty()); } TEST_F(FFmpegVideoDecoderTest, DoSeek) { @@ -484,33 +487,47 @@ TEST_F(FFmpegVideoDecoderTest, DoSeek) { FFmpegVideoDecoder::kNormal, FFmpegVideoDecoder::kFlushCodec, FFmpegVideoDecoder::kDecodeFinished, + FFmpegVideoDecoder::kStopped, }; + InitializeDecoderSuccessfully(); + for (size_t i = 0; i < ARRAYSIZE_UNSAFE(kStates); ++i) { SCOPED_TRACE(Message() << "Iteration " << i); - MockVideoDecodeEngine* mock_engine = - new StrictMock<MockVideoDecodeEngine>(); - scoped_refptr<DecoderPrivateMock> mock_decoder = - new StrictMock<DecoderPrivateMock>(mock_engine); - // Push in some timestamps. - mock_decoder->pts_heap_.Push(kTestPts1.timestamp); - mock_decoder->pts_heap_.Push(kTestPts2.timestamp); - mock_decoder->pts_heap_.Push(kTestPts1.timestamp); + decoder_->pts_heap_.Push(kTestPts1.timestamp); + decoder_->pts_heap_.Push(kTestPts2.timestamp); + decoder_->pts_heap_.Push(kTestPts3.timestamp); + + decoder_->state_ = kStates[i]; + + // Expect a pause. + // TODO(jiesun): call engine's Pause(). + StrictMock<MockFilterCallback> pause_done_cb; + EXPECT_CALL(pause_done_cb, OnFilterCallback()); + EXPECT_CALL(pause_done_cb, OnCallbackDestroyed()); + decoder_->Pause(pause_done_cb.NewCallback()); // Expect a flush. - mock_decoder->state_ = kStates[i]; - EXPECT_CALL(*mock_engine, Flush(_)) + EXPECT_CALL(*engine_, Flush(_)) + .WillOnce(WithArg<0>(InvokeRunnable())); + StrictMock<MockFilterCallback> flush_done_cb; + EXPECT_CALL(flush_done_cb, OnFilterCallback()); + EXPECT_CALL(flush_done_cb, OnCallbackDestroyed()); + decoder_->Flush(flush_done_cb.NewCallback()); + + // Expect Seek and verify the results. + EXPECT_CALL(*engine_, Seek(_)) .WillOnce(WithArg<0>(InvokeRunnable())); + StrictMock<MockFilterCallback> seek_done_cb; + EXPECT_CALL(seek_done_cb, OnFilterCallback()); + EXPECT_CALL(seek_done_cb, OnCallbackDestroyed()); + decoder_->Seek(kZero, seek_done_cb.NewCallback()); - TaskMocker done_cb; - EXPECT_CALL(done_cb, Run()).Times(1); - // Seek and verify the results. - mock_decoder->DoSeek(kZero, done_cb.CreateTask()); - EXPECT_TRUE(mock_decoder->pts_heap_.IsEmpty()); - EXPECT_EQ(FFmpegVideoDecoder::kNormal, mock_decoder->state_); + EXPECT_TRUE(decoder_->pts_heap_.IsEmpty()); + EXPECT_EQ(FFmpegVideoDecoder::kNormal, decoder_->state_); } } diff --git a/media/filters/omx_video_decode_engine.cc b/media/filters/omx_video_decode_engine.cc index db5b402..5e26bfa 100644 --- a/media/filters/omx_video_decode_engine.cc +++ b/media/filters/omx_video_decode_engine.cc @@ -255,6 +255,17 @@ void OmxVideoDecodeEngine::PortFlushDone(int port) { ComponentFlushDone(); } +void OmxVideoDecodeEngine::Seek(Task* done_cb) { + message_loop_->PostTask(FROM_HERE, + NewRunnableMethod(this, &OmxVideoDecodeEngine::SeekTask, done_cb)); +} + +void OmxVideoDecodeEngine::SeekTask(Task* done_cb) { + DCHECK_EQ(message_loop_, MessageLoop::current()); + AutoTaskRunner done_runner(done_cb); + // TODO(jiesun): add real logic here. +} + VideoFrame::Format OmxVideoDecodeEngine::GetSurfaceFormat() const { // TODO(jiesun): Both OmxHeaderType and EGLImage surface type could have // different surface formats. @@ -671,6 +682,10 @@ void OmxVideoDecodeEngine::FillThisBuffer( } } +bool OmxVideoDecodeEngine::ProvidesBuffer() const { + return !uses_egl_image_; +} + // Reconfigure port void OmxVideoDecodeEngine::OnPortSettingsChangedRun(int port, OMX_INDEXTYPE index) { diff --git a/media/filters/omx_video_decode_engine.h b/media/filters/omx_video_decode_engine.h index 7c1644f..db2c461 100644 --- a/media/filters/omx_video_decode_engine.h +++ b/media/filters/omx_video_decode_engine.h @@ -44,9 +44,11 @@ class OmxVideoDecodeEngine : Task* done_cb); virtual void EmptyThisBuffer(scoped_refptr<Buffer> buffer); virtual void FillThisBuffer(scoped_refptr<VideoFrame> video_frame); + virtual bool ProvidesBuffer() const; virtual void Stop(Task* done_cb); virtual void Pause(Task* done_cb); virtual void Flush(Task* done_cb); + virtual void Seek(Task* done_cb); virtual VideoFrame::Format GetSurfaceFormat() const; virtual State state() const; @@ -100,6 +102,7 @@ class OmxVideoDecodeEngine : void StopTask(Task* task); void PauseTask(Task* task); void FlushTask(Task* task); + void SeekTask(Task* task); // Transition method sequence for initialization bool CreateComponent(); diff --git a/media/filters/omx_video_decoder.cc b/media/filters/omx_video_decoder.cc index f36a305..3617ba3 100644 --- a/media/filters/omx_video_decoder.cc +++ b/media/filters/omx_video_decoder.cc @@ -72,6 +72,11 @@ void OmxVideoDecoder::FillThisBuffer(scoped_refptr<VideoFrame> frame) { &OmxVideoDecodeEngine::FillThisBuffer, frame)); } +bool OmxVideoDecoder::ProvidesBuffer() { + if (!omx_engine_.get()) return false; + return omx_engine_->ProvidesBuffer(); +} + void OmxVideoDecoder::Stop(FilterCallback* callback) { omx_engine_->Stop( NewRunnableMethod(this, @@ -92,9 +97,19 @@ void OmxVideoDecoder::PauseCompleteTask(FilterCallback* callback) { AutoCallbackRunner done_runner(callback); } +void OmxVideoDecoder::Flush(FilterCallback* callback) { + omx_engine_->Flush( + NewRunnableMethod(this, + &OmxVideoDecoder::FlushCompleteTask, callback)); +} + +void OmxVideoDecoder::FlushCompleteTask(FilterCallback* callback) { + AutoCallbackRunner done_runner(callback); +} + void OmxVideoDecoder::Seek(base::TimeDelta time, FilterCallback* callback) { - omx_engine_->Flush( + omx_engine_->Seek( NewRunnableMethod(this, &OmxVideoDecoder::SeekCompleteTask, callback)); } diff --git a/media/filters/omx_video_decoder.h b/media/filters/omx_video_decoder.h index 5f747ad..7699514 100644 --- a/media/filters/omx_video_decoder.h +++ b/media/filters/omx_video_decoder.h @@ -30,8 +30,10 @@ class OmxVideoDecoder : public VideoDecoder { virtual void Initialize(DemuxerStream* stream, FilterCallback* callback); virtual void Stop(FilterCallback* callback); virtual void Pause(FilterCallback* callback); + virtual void Flush(FilterCallback* callback); virtual void Seek(base::TimeDelta time, FilterCallback* callback); virtual void FillThisBuffer(scoped_refptr<VideoFrame> frame); + virtual bool ProvidesBuffer(); virtual const MediaFormat& media_format() { return media_format_; } private: @@ -47,6 +49,7 @@ class OmxVideoDecoder : public VideoDecoder { void StopCompleteTask(FilterCallback* callback); void PauseCompleteTask(FilterCallback* callback); + void FlushCompleteTask(FilterCallback* callback); void SeekCompleteTask(FilterCallback* callback); // TODO(hclam): This is very ugly that we keep reference instead of diff --git a/media/filters/video_decode_engine.h b/media/filters/video_decode_engine.h index 8dc9d25..3efe647 100644 --- a/media/filters/video_decode_engine.h +++ b/media/filters/video_decode_engine.h @@ -26,6 +26,7 @@ class VideoDecodeEngine { kCreated, kNormal, kStopped, + kFlushing, kError, }; @@ -65,6 +66,7 @@ class VideoDecodeEngine { virtual void Stop(Task* done_cb) = 0; virtual void Pause(Task* done_cb) = 0; + virtual void Seek(Task* done_cb) = 0; // Flushes the decode engine of any buffered input packets. virtual void Flush(Task* done_cb) = 0; @@ -73,6 +75,8 @@ class VideoDecodeEngine { // DecodeFrame(). virtual VideoFrame::Format GetSurfaceFormat() const = 0; + virtual bool ProvidesBuffer() const = 0; + // Returns the current state of the decode engine. virtual State state() const = 0; }; diff --git a/media/filters/video_renderer_base.cc b/media/filters/video_renderer_base.cc index 00cf9dc..166c992 100644 --- a/media/filters/video_renderer_base.cc +++ b/media/filters/video_renderer_base.cc @@ -4,6 +4,7 @@ #include "base/callback.h" #include "media/base/buffers.h" +#include "media/base/callback.h" #include "media/base/filter_host.h" #include "media/base/video_frame.h" #include "media/filters/video_renderer_base.h" @@ -90,7 +91,7 @@ bool VideoRendererBase::ParseMediaFormat( void VideoRendererBase::Play(FilterCallback* callback) { AutoLock auto_lock(lock_); - DCHECK_EQ(kPaused, state_); + DCHECK(kPaused == state_ || kFlushing == state_); scoped_ptr<FilterCallback> c(callback); state_ = kPlaying; callback->Run(); @@ -99,17 +100,24 @@ void VideoRendererBase::Play(FilterCallback* callback) { void VideoRendererBase::Pause(FilterCallback* callback) { AutoLock auto_lock(lock_); DCHECK(state_ == kPlaying || state_ == kEnded); - pause_callback_.reset(callback); + AutoCallbackRunner done_runner(callback); state_ = kPaused; +} + +void VideoRendererBase::Flush(FilterCallback* callback) { + DCHECK(state_ == kPaused); + + AutoLock auto_lock(lock_); + flush_callback_.reset(callback); + state_ = kFlushing; - // TODO(jiesun): currently we use Pause() to fulfill Flush(). // Filter is considered paused when we've finished all pending reads, which // implies all buffers are returned to owner in Decoder/Renderer. Renderer // is considered paused with one more contingency that |pending_paint_| is // false, such that no client of us is holding any reference to VideoFrame. if (pending_reads_ == 0 && pending_paint_ == false) { - pause_callback_->Run(); - pause_callback_.reset(); + flush_callback_->Run(); + flush_callback_.reset(); FlushBuffers(); } } @@ -147,7 +155,7 @@ void VideoRendererBase::SetPlaybackRate(float playback_rate) { void VideoRendererBase::Seek(base::TimeDelta time, FilterCallback* callback) { AutoLock auto_lock(lock_); - DCHECK_EQ(kPaused, state_); + DCHECK(kPaused == state_ || kFlushing == state_); DCHECK_EQ(0u, pending_reads_) << "Pending reads should have completed"; state_ = kSeeking; seek_callback_.reset(callback); @@ -242,8 +250,7 @@ void VideoRendererBase::ThreadMain() { if (state_ == kStopped) return; - if (state_ == kPaused || state_ == kSeeking || state_ == kEnded || - playback_rate_ == 0) { + if (state_ != kPlaying || playback_rate_ == 0) { remaining_time = kIdleTimeDelta; } else if (frames_queue_ready_.empty() || frames_queue_ready_.front()->IsEndOfStream()) { @@ -350,7 +357,7 @@ void VideoRendererBase::GetCurrentFrame(scoped_refptr<VideoFrame>* frame_out) { // We should have initialized and have the current frame. DCHECK(state_ == kPaused || state_ == kSeeking || state_ == kPlaying || - state_ == kEnded); + state_ == kFlushing || state_ == kEnded); *frame_out = current_frame_; pending_paint_ = true; } @@ -368,11 +375,11 @@ void VideoRendererBase::PutCurrentFrame(scoped_refptr<VideoFrame> frame) { // frame is timed-out. We will wake up our main thread to advance the current // frame when this is true. frame_available_.Signal(); - if (state_ == kPaused && pending_reads_ == 0 && pause_callback_.get()) { + if (state_ == kFlushing && pending_reads_ == 0 && flush_callback_.get()) { // No more pending reads! We're now officially "paused". FlushBuffers(); - pause_callback_->Run(); - pause_callback_.reset(); + flush_callback_->Run(); + flush_callback_.reset(); } } @@ -387,7 +394,7 @@ void VideoRendererBase::OnFillBufferDone(scoped_refptr<VideoFrame> frame) { } DCHECK(state_ == kPaused || state_ == kSeeking || state_ == kPlaying || - state_ == kEnded); + state_ == kFlushing || state_ == kEnded); DCHECK_GT(pending_reads_, 0u); --pending_reads_; @@ -405,7 +412,7 @@ void VideoRendererBase::OnFillBufferDone(scoped_refptr<VideoFrame> frame) { // Check for our preroll complete condition. if (state_ == kSeeking) { DCHECK(seek_callback_.get()); - if (frames_queue_ready_.size() == kMaxFrames) { + if (frames_queue_ready_.size() == kMaxFrames || frame->IsEndOfStream()) { // We're paused, so make sure we update |current_frame_| to represent // our new location. state_ = kPaused; @@ -413,18 +420,22 @@ void VideoRendererBase::OnFillBufferDone(scoped_refptr<VideoFrame> frame) { // Because we might remain paused (i.e., we were not playing before we // received a seek), we can't rely on ThreadMain() to notify the subclass // the frame has been updated. - current_frame_ = frames_queue_ready_.front(); - frames_queue_ready_.pop_front(); + scoped_refptr<VideoFrame> first_frame; + first_frame = frames_queue_ready_.front(); + if (!first_frame->IsEndOfStream()) { + frames_queue_ready_.pop_front(); + current_frame_ = first_frame; + } OnFrameAvailable(); seek_callback_->Run(); seek_callback_.reset(); } - } else if (state_ == kPaused && pending_reads_ == 0 && !pending_paint_) { + } else if (state_ == kFlushing && pending_reads_ == 0 && !pending_paint_) { // No more pending reads! We're now officially "paused". - if (pause_callback_.get()) { - pause_callback_->Run(); - pause_callback_.reset(); + if (flush_callback_.get()) { + flush_callback_->Run(); + flush_callback_.reset(); } } } @@ -450,7 +461,7 @@ void VideoRendererBase::FlushBuffers() { // We should never put EOF frame into "done queue". while (!frames_queue_ready_.empty()) { scoped_refptr<VideoFrame> video_frame = frames_queue_ready_.front(); - if (video_frame->IsEndOfStream()) { + if (!video_frame->IsEndOfStream()) { frames_queue_done_.push_back(video_frame); } frames_queue_ready_.pop_front(); diff --git a/media/filters/video_renderer_base.h b/media/filters/video_renderer_base.h index f8176f7..1aa42d2 100644 --- a/media/filters/video_renderer_base.h +++ b/media/filters/video_renderer_base.h @@ -46,6 +46,7 @@ class VideoRendererBase : public VideoRenderer, // MediaFilter implementation. virtual void Play(FilterCallback* callback); virtual void Pause(FilterCallback* callback); + virtual void Flush(FilterCallback* callback); virtual void Stop(FilterCallback* callback); virtual void SetPlaybackRate(float playback_rate); virtual void Seek(base::TimeDelta time, FilterCallback* callback); @@ -153,6 +154,7 @@ class VideoRendererBase : public VideoRenderer, enum State { kUninitialized, kPaused, + kFlushing, kSeeking, kPlaying, kEnded, @@ -178,7 +180,7 @@ class VideoRendererBase : public VideoRenderer, float playback_rate_; // Filter callbacks. - scoped_ptr<FilterCallback> pause_callback_; + scoped_ptr<FilterCallback> flush_callback_; scoped_ptr<FilterCallback> seek_callback_; base::TimeDelta seek_timestamp_; |