diff options
author | nsylvain@chromium.org <nsylvain@chromium.org@0039d316-1c4b-4281-b951-d872f2087c98> | 2010-08-11 02:42:38 +0000 |
---|---|---|
committer | nsylvain@chromium.org <nsylvain@chromium.org@0039d316-1c4b-4281-b951-d872f2087c98> | 2010-08-11 02:42:38 +0000 |
commit | 1af19cfd7b1ef9924516dbe811db495315feaefe (patch) | |
tree | 4ef1b2ce540ab324c7fb40644711f3b44c960bcf /media | |
parent | 3ce9f01e0678bcdf044efeb871fef4179573a3bc (diff) | |
download | chromium_src-1af19cfd7b1ef9924516dbe811db495315feaefe.zip chromium_src-1af19cfd7b1ef9924516dbe811db495315feaefe.tar.gz chromium_src-1af19cfd7b1ef9924516dbe811db495315feaefe.tar.bz2 |
Revert 55603
video-seek-past-end-playing.html has been failing since this change
Original description:
preparation for recycling buffer, patch 2
1. add ProvidesBuffer in Filter interface, not used yet.
2. add Flush stage in pipeline. not used. Render's pause work is moved to Renderer's flush().
3. merge decoder_base with ffmpeg_video_decoder. because it is shared by audio.
Review URL: http://codereview.chromium.org/3030013
TBR=jiesun@google.com
Review URL: http://codereview.chromium.org/3122008
git-svn-id: svn://svn.chromium.org/chrome/trunk/src@55659 0039d316-1c4b-4281-b951-d872f2087c98
Diffstat (limited to 'media')
-rw-r--r-- | media/base/filters.h | 25 | ||||
-rw-r--r-- | media/base/mock_filters.h | 2 | ||||
-rw-r--r-- | media/base/pipeline_impl.cc | 19 | ||||
-rw-r--r-- | media/base/pipeline_impl.h | 1 | ||||
-rw-r--r-- | media/filters/ffmpeg_video_decode_engine.cc | 18 | ||||
-rw-r--r-- | media/filters/ffmpeg_video_decode_engine.h | 7 | ||||
-rw-r--r-- | media/filters/ffmpeg_video_decode_engine_unittest.cc | 53 | ||||
-rw-r--r-- | media/filters/ffmpeg_video_decoder.cc | 229 | ||||
-rw-r--r-- | media/filters/ffmpeg_video_decoder.h | 60 | ||||
-rw-r--r-- | media/filters/ffmpeg_video_decoder_unittest.cc | 325 | ||||
-rw-r--r-- | media/filters/omx_video_decode_engine.cc | 15 | ||||
-rw-r--r-- | media/filters/omx_video_decode_engine.h | 3 | ||||
-rw-r--r-- | media/filters/omx_video_decoder.cc | 17 | ||||
-rw-r--r-- | media/filters/omx_video_decoder.h | 3 | ||||
-rw-r--r-- | media/filters/video_decode_engine.h | 4 | ||||
-rw-r--r-- | media/filters/video_renderer_base.cc | 53 | ||||
-rw-r--r-- | media/filters/video_renderer_base.h | 4 |
17 files changed, 298 insertions, 540 deletions
diff --git a/media/base/filters.h b/media/base/filters.h index e9d2c27..8879233 100644 --- a/media/base/filters.h +++ b/media/base/filters.h @@ -92,7 +92,7 @@ class MediaFilter : public base::RefCountedThreadSafe<MediaFilter> { // The pipeline has resumed playback. Filters can continue requesting reads. // Filters may implement this method if they need to respond to this call. - // TODO(boliu): Check that callback is not NULL in subclasses. + // TODO(boliu): Check that callback is not NULL in sublcasses. virtual void Play(FilterCallback* callback) { DCHECK(callback); if (callback) { @@ -101,9 +101,10 @@ class MediaFilter : public base::RefCountedThreadSafe<MediaFilter> { } } - // The pipeline has paused playback. Filters should stop buffer exchange. - // Filters may implement this method if they need to respond to this call. - // TODO(boliu): Check that callback is not NULL in subclasses. + // The pipeline has paused playback. Filters should fulfill any existing read + // requests and then idle. Filters may implement this method if they need to + // respond to this call. + // TODO(boliu): Check that callback is not NULL in sublcasses. virtual void Pause(FilterCallback* callback) { DCHECK(callback); if (callback) { @@ -112,20 +113,9 @@ class MediaFilter : public base::RefCountedThreadSafe<MediaFilter> { } } - // The pipeline has been flushed. Filters should return buffer to owners. - // Filters may implement this method if they need to respond to this call. - // TODO(boliu): Check that callback is not NULL in subclasses. - virtual void Flush(FilterCallback* callback) { - DCHECK(callback); - if (callback) { - callback->Run(); - delete callback; - } - } - // The pipeline is being stopped either as a result of an error or because // the client called Stop(). - // TODO(boliu): Check that callback is not NULL in subclasses. + // TODO(boliu): Check that callback is not NULL in sublcasses. virtual void Stop(FilterCallback* callback) { DCHECK(callback); if (callback) { @@ -303,9 +293,6 @@ class VideoDecoder : public MediaFilter { // We could also pass empty pointer here to let decoder provide buffers pool. virtual void FillThisBuffer(scoped_refptr<VideoFrame> frame) = 0; - // Indicate whether decoder provides its own output buffers - virtual bool ProvidesBuffer() = 0; - private: scoped_ptr<FillBufferDoneCallback> fill_buffer_done_callback_; }; diff --git a/media/base/mock_filters.h b/media/base/mock_filters.h index 7f1bdaf..ac9aab6 100644 --- a/media/base/mock_filters.h +++ b/media/base/mock_filters.h @@ -172,7 +172,6 @@ class MockVideoDecoder : public VideoDecoder { FilterCallback* callback)); MOCK_METHOD0(media_format, const MediaFormat&()); MOCK_METHOD1(FillThisBuffer, void(scoped_refptr<VideoFrame>)); - MOCK_METHOD0(ProvidesBuffer, bool()); protected: virtual ~MockVideoDecoder() {} @@ -223,7 +222,6 @@ class MockVideoRenderer : public VideoRenderer { MOCK_METHOD2(Initialize, void(VideoDecoder* decoder, FilterCallback* callback)); MOCK_METHOD0(HasEnded, bool()); - MOCK_METHOD1(FillThisBufferDone, void(scoped_refptr<VideoFrame> frame)); protected: virtual ~MockVideoRenderer() {} diff --git a/media/base/pipeline_impl.cc b/media/base/pipeline_impl.cc index d8b3192..e15492e 100644 --- a/media/base/pipeline_impl.cc +++ b/media/base/pipeline_impl.cc @@ -57,6 +57,19 @@ const char* GetThreadName() { } } +// Helper function used with NewRunnableMethod to implement a (very) crude +// blocking counter. +// +// TODO(scherkus): remove this as soon as Stop() is made asynchronous. +void DecrementCounter(Lock* lock, ConditionVariable* cond_var, int* count) { + AutoLock auto_lock(*lock); + --(*count); + CHECK(*count >= 0); + if (*count == 0) { + cond_var->Signal(); + } +} + } // namespace PipelineImpl::PipelineImpl(MessageLoop* message_loop) @@ -136,7 +149,6 @@ bool PipelineImpl::IsInitialized() const { AutoLock auto_lock(lock_); switch (state_) { case kPausing: - case kFlushing: case kSeeking: case kStarting: case kStarted: @@ -370,7 +382,6 @@ void PipelineImpl::FinishInitialization() { // static bool PipelineImpl::TransientState(State state) { return state == kPausing || - state == kFlushing || state == kSeeking || state == kStarting || state == kStopping; @@ -380,8 +391,6 @@ bool PipelineImpl::TransientState(State state) { PipelineImpl::State PipelineImpl::FindNextState(State current) { // TODO(scherkus): refactor InitializeTask() to make use of this function. if (current == kPausing) - return kFlushing; - if (current == kFlushing) return kSeeking; if (current == kSeeking) return kStarting; @@ -833,8 +842,6 @@ void PipelineImpl::FilterStateTransitionTask() { MediaFilter* filter = filters_[filters_.size() - remaining_transitions_]; if (state_ == kPausing) { filter->Pause(NewCallback(this, &PipelineImpl::OnFilterStateTransition)); - } else if (state_ == kFlushing) { - filter->Flush(NewCallback(this, &PipelineImpl::OnFilterStateTransition)); } else if (state_ == kSeeking) { filter->Seek(seek_timestamp_, NewCallback(this, &PipelineImpl::OnFilterStateTransition)); diff --git a/media/base/pipeline_impl.h b/media/base/pipeline_impl.h index 2486f0a..a590b47 100644 --- a/media/base/pipeline_impl.h +++ b/media/base/pipeline_impl.h @@ -118,7 +118,6 @@ class PipelineImpl : public Pipeline, public FilterHost { kInitVideoRenderer, kPausing, kSeeking, - kFlushing, kStarting, kStarted, kEnded, diff --git a/media/filters/ffmpeg_video_decode_engine.cc b/media/filters/ffmpeg_video_decode_engine.cc index 0454eb4..6c5b16f 100644 --- a/media/filters/ffmpeg_video_decode_engine.cc +++ b/media/filters/ffmpeg_video_decode_engine.cc @@ -119,15 +119,6 @@ void FFmpegVideoDecodeEngine::EmptyThisBuffer( DecodeFrame(buffer); } -void FFmpegVideoDecodeEngine::FillThisBuffer(scoped_refptr<VideoFrame> frame) { - scoped_refptr<Buffer> buffer; - empty_this_buffer_callback_->Run(buffer); -} - -bool FFmpegVideoDecodeEngine::ProvidesBuffer() const { - return false; -} - // Try to decode frame when both input and output are ready. void FFmpegVideoDecodeEngine::DecodeFrame(scoped_refptr<Buffer> buffer) { scoped_refptr<VideoFrame> video_frame; @@ -166,10 +157,7 @@ void FFmpegVideoDecodeEngine::DecodeFrame(scoped_refptr<Buffer> buffer) { // If frame_decoded == 0, then no frame was produced. if (frame_decoded == 0) { - if (buffer->IsEndOfStream()) // We had started flushing. - fill_this_buffer_callback_->Run(video_frame); - else - empty_this_buffer_callback_->Run(buffer); + fill_this_buffer_callback_->Run(video_frame); return; } @@ -246,10 +234,6 @@ void FFmpegVideoDecodeEngine::Flush(Task* done_cb) { avcodec_flush_buffers(codec_context_); } -void FFmpegVideoDecodeEngine::Seek(Task* done_cb) { - AutoTaskRunner done_runner(done_cb); -} - VideoFrame::Format FFmpegVideoDecodeEngine::GetSurfaceFormat() const { // J (Motion JPEG) versions of YUV are full range 0..255. // Regular (MPEG) YUV is 16..240. diff --git a/media/filters/ffmpeg_video_decode_engine.h b/media/filters/ffmpeg_video_decode_engine.h index 57d7fdf..3941c52 100644 --- a/media/filters/ffmpeg_video_decode_engine.h +++ b/media/filters/ffmpeg_video_decode_engine.h @@ -16,6 +16,9 @@ struct AVStream; namespace media { +class InputBuffer; +class OmxCodec; + class FFmpegVideoDecodeEngine : public VideoDecodeEngine { public: FFmpegVideoDecodeEngine(); @@ -28,12 +31,10 @@ class FFmpegVideoDecodeEngine : public VideoDecodeEngine { FillThisBufferCallback* fill_buffer_callback, Task* done_cb); virtual void EmptyThisBuffer(scoped_refptr<Buffer> buffer); - virtual void FillThisBuffer(scoped_refptr<VideoFrame> frame); - virtual bool ProvidesBuffer() const; + virtual void FillThisBuffer(scoped_refptr<VideoFrame> frame) {} virtual void Stop(Task* done_cb); virtual void Pause(Task* done_cb); virtual void Flush(Task* done_cb); - virtual void Seek(Task* done_cb); virtual VideoFrame::Format GetSurfaceFormat() const; virtual State state() const { return state_; } diff --git a/media/filters/ffmpeg_video_decode_engine_unittest.cc b/media/filters/ffmpeg_video_decode_engine_unittest.cc index deb1305..d335b6e 100644 --- a/media/filters/ffmpeg_video_decode_engine_unittest.cc +++ b/media/filters/ffmpeg_video_decode_engine_unittest.cc @@ -55,13 +55,6 @@ class FFmpegVideoDecodeEngineTest : public testing::Test { test_engine_.reset(new FFmpegVideoDecodeEngine()); test_engine_->SetCodecContextForTest(&codec_context_); - - VideoFrame::CreateFrame(VideoFrame::YV12, - kWidth, - kHeight, - StreamSample::kInvalidTimestamp, - StreamSample::kInvalidTimestamp, - &video_frame_); } ~FFmpegVideoDecodeEngineTest() { @@ -86,17 +79,15 @@ class FFmpegVideoDecodeEngineTest : public testing::Test { test_engine_->Initialize( MessageLoop::current(), &stream_, - NewCallback(this, &FFmpegVideoDecodeEngineTest::OnEmptyBufferDone), - NewCallback(this, &FFmpegVideoDecodeEngineTest::OnFillBufferDone), + NULL, + NewCallback(this, &FFmpegVideoDecodeEngineTest::OnDecodeComplete), done_cb.CreateTask()); EXPECT_EQ(VideoDecodeEngine::kNormal, test_engine_->state()); } public: - MOCK_METHOD1(OnFillBufferDone, + MOCK_METHOD1(OnDecodeComplete, void(scoped_refptr<VideoFrame> video_frame)); - MOCK_METHOD1(OnEmptyBufferDone, - void(scoped_refptr<Buffer> buffer)); scoped_refptr<VideoFrame> video_frame_; protected: @@ -190,10 +181,6 @@ TEST_F(FFmpegVideoDecodeEngineTest, Initialize_OpenDecoderFails) { EXPECT_EQ(VideoDecodeEngine::kError, test_engine_->state()); } -ACTION_P2(DemuxComplete, engine, buffer) { - engine->EmptyThisBuffer(buffer); -} - ACTION_P(DecodeComplete, decoder) { decoder->video_frame_ = arg0; } @@ -215,11 +202,9 @@ TEST_F(FFmpegVideoDecodeEngineTest, DecodeFrame_Normal) { .WillOnce(DoAll(SetArgumentPointee<2>(1), // Simulate 1 byte frame. Return(0))); - EXPECT_CALL(*this, OnEmptyBufferDone(_)) - .WillOnce(DemuxComplete(test_engine_.get(), buffer_)); - EXPECT_CALL(*this, OnFillBufferDone(_)) - .WillOnce(DecodeComplete(this)); - test_engine_->FillThisBuffer(video_frame_); + EXPECT_CALL(*this, OnDecodeComplete(_)) + .WillOnce(DecodeComplete(this)); + test_engine_->EmptyThisBuffer(buffer_); // |video_frame_| timestamp is 0 because we set the timestamp based off // the buffer timestamp. @@ -232,23 +217,16 @@ TEST_F(FFmpegVideoDecodeEngineTest, DecodeFrame_0ByteFrame) { Initialize(); // Expect a bunch of avcodec calls. - EXPECT_CALL(mock_ffmpeg_, AVInitPacket(_)) - .Times(2); + EXPECT_CALL(mock_ffmpeg_, AVInitPacket(_)); EXPECT_CALL(mock_ffmpeg_, AVCodecDecodeVideo2(&codec_context_, &yuv_frame_, _, _)) .WillOnce(DoAll(SetArgumentPointee<2>(0), // Simulate 0 byte frame. - Return(0))) - .WillOnce(DoAll(SetArgumentPointee<2>(1), // Simulate 1 byte frame. Return(0))); - EXPECT_CALL(*this, OnEmptyBufferDone(_)) - .WillOnce(DemuxComplete(test_engine_.get(), buffer_)) - .WillOnce(DemuxComplete(test_engine_.get(), buffer_)); - EXPECT_CALL(*this, OnFillBufferDone(_)) - .WillOnce(DecodeComplete(this)); - test_engine_->FillThisBuffer(video_frame_); - - EXPECT_TRUE(video_frame_.get()); + EXPECT_CALL(*this, OnDecodeComplete(_)) + .WillOnce(DecodeComplete(this)); + test_engine_->EmptyThisBuffer(buffer_); + EXPECT_FALSE(video_frame_.get()); } TEST_F(FFmpegVideoDecodeEngineTest, DecodeFrame_DecodeError) { @@ -260,12 +238,9 @@ TEST_F(FFmpegVideoDecodeEngineTest, DecodeFrame_DecodeError) { AVCodecDecodeVideo2(&codec_context_, &yuv_frame_, _, _)) .WillOnce(Return(-1)); - EXPECT_CALL(*this, OnEmptyBufferDone(_)) - .WillOnce(DemuxComplete(test_engine_.get(), buffer_)); - EXPECT_CALL(*this, OnFillBufferDone(_)) - .WillOnce(DecodeComplete(this)); - test_engine_->FillThisBuffer(video_frame_); - + EXPECT_CALL(*this, OnDecodeComplete(_)) + .WillOnce(DecodeComplete(this)); + test_engine_->EmptyThisBuffer(buffer_); EXPECT_FALSE(video_frame_.get()); } diff --git a/media/filters/ffmpeg_video_decoder.cc b/media/filters/ffmpeg_video_decoder.cc index 8cbbb13..4496505 100644 --- a/media/filters/ffmpeg_video_decoder.cc +++ b/media/filters/ffmpeg_video_decoder.cc @@ -7,9 +7,7 @@ #include <deque> #include "base/task.h" -#include "media/base/callback.h" #include "media/base/filters.h" -#include "media/base/filter_host.h" #include "media/base/limits.h" #include "media/base/media_format.h" #include "media/base/video_frame.h" @@ -25,35 +23,22 @@ FFmpegVideoDecoder::FFmpegVideoDecoder(VideoDecodeEngine* engine) : width_(0), height_(0), time_base_(new AVRational()), - state_(kUnInitialized), - decode_engine_(engine), - pending_reads_(0), - pending_requests_(0) { + state_(kNormal), + decode_engine_(engine) { } FFmpegVideoDecoder::~FFmpegVideoDecoder() { } -void FFmpegVideoDecoder::Initialize(DemuxerStream* demuxer_stream, - FilterCallback* callback) { - if (MessageLoop::current() != message_loop()) { - message_loop()->PostTask(FROM_HERE, - NewRunnableMethod(this, - &FFmpegVideoDecoder::Initialize, - demuxer_stream, - callback)); - return; - } - - DCHECK_EQ(MessageLoop::current(), message_loop()); - DCHECK(!demuxer_stream_); - - demuxer_stream_ = demuxer_stream; +void FFmpegVideoDecoder::DoInitialize(DemuxerStream* demuxer_stream, + bool* success, + Task* done_cb) { + AutoTaskRunner done_runner(done_cb); + *success = false; // Get the AVStream by querying for the provider interface. AVStreamProvider* av_stream_provider; if (!demuxer_stream->QueryInterface(&av_stream_provider)) { - FFmpegVideoDecoder::OnInitializeComplete(callback); return; } AVStream* av_stream = av_stream_provider->GetAVStream(); @@ -68,27 +53,25 @@ void FFmpegVideoDecoder::Initialize(DemuxerStream* demuxer_stream, if (width_ > Limits::kMaxDimension || height_ > Limits::kMaxDimension || (width_ * height_) > Limits::kMaxCanvas) { - FFmpegVideoDecoder::OnInitializeComplete(callback); return; } decode_engine_->Initialize( message_loop(), av_stream, - NewCallback(this, &FFmpegVideoDecoder::OnEngineEmptyBufferDone), - NewCallback(this, &FFmpegVideoDecoder::OnEngineFillBufferDone), + NewCallback(this, &FFmpegVideoDecoder::OnEmptyBufferDone), + NewCallback(this, &FFmpegVideoDecoder::OnDecodeComplete), NewRunnableMethod(this, &FFmpegVideoDecoder::OnInitializeComplete, - callback)); + success, + done_runner.release())); } -void FFmpegVideoDecoder::OnInitializeComplete(FilterCallback* callback) { - CHECK_EQ(MessageLoop::current(), message_loop()); +void FFmpegVideoDecoder::OnInitializeComplete(bool* success, Task* done_cb) { + AutoTaskRunner done_runner(done_cb); - AutoCallbackRunner done_runner(callback); - - bool success = decode_engine_->state() == VideoDecodeEngine::kNormal; - if (success) { + *success = decode_engine_->state() == VideoDecodeEngine::kNormal; + if (*success) { media_format_.SetAsString(MediaFormat::kMimeType, mime_type::kUncompressedVideo); media_format_.SetAsInteger(MediaFormat::kWidth, width_); @@ -99,99 +82,32 @@ void FFmpegVideoDecoder::OnInitializeComplete(FilterCallback* callback) { media_format_.SetAsInteger( MediaFormat::kSurfaceFormat, static_cast<int>(decode_engine_->GetSurfaceFormat())); - state_ = kNormal; - } else { - host()->SetError(PIPELINE_ERROR_DECODE); - } -} - -void FFmpegVideoDecoder::Stop(FilterCallback* callback) { - if (MessageLoop::current() != message_loop()) { - message_loop()->PostTask(FROM_HERE, - NewRunnableMethod(this, - &FFmpegVideoDecoder::Stop, - callback)); - return; } - - DCHECK_EQ(MessageLoop::current(), message_loop()); - - decode_engine_->Stop( - NewRunnableMethod(this, &FFmpegVideoDecoder::OnStopComplete, callback)); } -void FFmpegVideoDecoder::OnStopComplete(FilterCallback* callback) { - DCHECK_EQ(MessageLoop::current(), message_loop()); - - AutoCallbackRunner done_runner(callback); - state_ = kStopped; +void FFmpegVideoDecoder::DoStop(Task* done_cb) { + decode_engine_->Stop(done_cb); } -void FFmpegVideoDecoder::Flush(FilterCallback* callback) { - if (MessageLoop::current() != message_loop()) { - message_loop()->PostTask(FROM_HERE, - NewRunnableMethod(this, - &FFmpegVideoDecoder::Flush, - callback)); - return; - } - - DCHECK_EQ(MessageLoop::current(), message_loop()); - +void FFmpegVideoDecoder::DoSeek(base::TimeDelta time, Task* done_cb) { // Everything in the presentation time queue is invalid, clear the queue. while (!pts_heap_.IsEmpty()) pts_heap_.Pop(); - decode_engine_->Flush( - NewRunnableMethod(this, &FFmpegVideoDecoder::OnFlushComplete, callback)); -} - -void FFmpegVideoDecoder::OnFlushComplete(FilterCallback* callback) { - DCHECK_EQ(MessageLoop::current(), message_loop()); - - AutoCallbackRunner done_runner(callback); -} - -void FFmpegVideoDecoder::Seek(base::TimeDelta time, - FilterCallback* callback) { - if (MessageLoop::current() != message_loop()) { - message_loop()->PostTask(FROM_HERE, - NewRunnableMethod(this, - &FFmpegVideoDecoder::Seek, - time, - callback)); - return; - } - - DCHECK_EQ(MessageLoop::current(), message_loop()); - DCHECK_EQ(0u, pending_reads_) << "Pending reads should have completed"; - DCHECK_EQ(0u, pending_requests_) << "Pending requests should be empty"; - - decode_engine_->Seek( - NewRunnableMethod(this, &FFmpegVideoDecoder::OnSeekComplete, callback)); -} - -void FFmpegVideoDecoder::OnSeekComplete(FilterCallback* callback) { - DCHECK_EQ(MessageLoop::current(), message_loop()); - - AutoCallbackRunner done_runner(callback); + // We're back where we started. It should be completely safe to flush here + // since DecoderBase uses |expecting_discontinuous_| to verify that the next + // time DoDecode() is called we will have a discontinuous buffer. + // + // TODO(ajwong): Should we put a guard here to prevent leaving kError. state_ = kNormal; -} -void FFmpegVideoDecoder::OnReadComplete(Buffer* buffer_in) { - scoped_refptr<Buffer> buffer = buffer_in; - message_loop()->PostTask( - FROM_HERE, - NewRunnableMethod(this, - &FFmpegVideoDecoder::OnReadCompleteTask, - buffer)); + decode_engine_->Flush(done_cb); } -void FFmpegVideoDecoder::OnReadCompleteTask(scoped_refptr<Buffer> buffer) { - DCHECK_EQ(MessageLoop::current(), message_loop()); - DCHECK_GT(pending_reads_, 0u); - - --pending_reads_; +void FFmpegVideoDecoder::DoDecode(Buffer* buffer) { + // TODO(ajwong): This DoDecode() and OnDecodeComplete() set of functions is + // too complicated to easily unittest. The test becomes fragile. Try to + // find a way to reorganize into smaller units for testing. // During decode, because reads are issued asynchronously, it is possible to // receive multiple end of stream buffers since each read is acked. When the @@ -219,13 +135,9 @@ void FFmpegVideoDecoder::OnReadCompleteTask(scoped_refptr<Buffer> buffer) { // Any time buffer->IsDiscontinuous() is true. // // If the decoding is finished, we just always return empty frames. - if (state_ == kDecodeFinished || state_ == kStopped) { - DCHECK(buffer->IsEndOfStream()); - - // Signal VideoRenderer the end of the stream event. - scoped_refptr<VideoFrame> video_frame; - VideoFrame::CreateEmptyFrame(&video_frame); - fill_buffer_done_callback()->Run(video_frame); + if (state_ == kDecodeFinished) { + EnqueueEmptyFrame(); + OnEmptyBufferDone(NULL); return; } @@ -240,7 +152,7 @@ void FFmpegVideoDecoder::OnReadCompleteTask(scoped_refptr<Buffer> buffer) { // // TODO(ajwong): This push logic, along with the pop logic below needs to // be reevaluated to correctly handle decode errors. - if (state_ == kNormal && !buffer->IsEndOfStream() && + if (state_ == kNormal && buffer->GetTimestamp() != StreamSample::kInvalidTimestamp) { pts_heap_.Push(buffer->GetTimestamp()); } @@ -249,36 +161,8 @@ void FFmpegVideoDecoder::OnReadCompleteTask(scoped_refptr<Buffer> buffer) { decode_engine_->EmptyThisBuffer(buffer); } -void FFmpegVideoDecoder::FillThisBuffer( +void FFmpegVideoDecoder::OnDecodeComplete( scoped_refptr<VideoFrame> video_frame) { - if (MessageLoop::current() != message_loop()) { - message_loop()->PostTask( - FROM_HERE, - NewRunnableMethod(this, - &FFmpegVideoDecoder::FillThisBuffer, - video_frame)); - return; - } - - DCHECK_EQ(MessageLoop::current(), message_loop()); - - // Synchronized flushing before stop should prevent this. - if (state_ == kStopped) - return; // Discard the video frame. - - // Notify decode engine the available of new frame. - ++pending_requests_; - decode_engine_->FillThisBuffer(video_frame); -} - -void FFmpegVideoDecoder::OnEngineFillBufferDone( - scoped_refptr<VideoFrame> video_frame) { - DCHECK_EQ(MessageLoop::current(), message_loop()); - - // TODO(jiesun): Flush before stop will prevent this from happening. - if (state_ == kStopped) - return; // Discard the video frame. - if (video_frame.get()) { // If we actually got data back, enqueue a frame. last_pts_ = FindPtsAndDuration(*time_base_, &pts_heap_, last_pts_, @@ -286,34 +170,39 @@ void FFmpegVideoDecoder::OnEngineFillBufferDone( video_frame->SetTimestamp(last_pts_.timestamp); video_frame->SetDuration(last_pts_.duration); - - // Deliver this frame to VideoRenderer. - --pending_requests_; - fill_buffer_done_callback()->Run(video_frame); + EnqueueVideoFrame(video_frame); } else { // When in kFlushCodec, any errored decode, or a 0-lengthed frame, // is taken as a signal to stop decoding. if (state_ == kFlushCodec) { state_ = kDecodeFinished; - - // Signal VideoRenderer the end of the stream event. - scoped_refptr<VideoFrame> video_frame; - VideoFrame::CreateEmptyFrame(&video_frame); - fill_buffer_done_callback()->Run(video_frame); + EnqueueEmptyFrame(); } } + + OnEmptyBufferDone(NULL); } -void FFmpegVideoDecoder::OnEngineEmptyBufferDone( - scoped_refptr<Buffer> buffer) { - DCHECK_EQ(MessageLoop::current(), message_loop()); - DCHECK_LE(pending_reads_, pending_requests_); +void FFmpegVideoDecoder::OnEmptyBufferDone(scoped_refptr<Buffer> buffer) { + // Currently we just ignore the returned buffer. + DecoderBase<VideoDecoder, VideoFrame>::OnDecodeComplete(); +} - if (state_ != kDecodeFinished) { - demuxer_stream_->Read( - NewCallback(this, &FFmpegVideoDecoder::OnReadComplete)); - ++pending_reads_; - } +void FFmpegVideoDecoder::FillThisBuffer(scoped_refptr<VideoFrame> frame) { + DecoderBase<VideoDecoder, VideoFrame>::FillThisBuffer(frame); + // Notify decode engine the available of new frame. + decode_engine_->FillThisBuffer(frame); +} + +void FFmpegVideoDecoder::EnqueueVideoFrame( + const scoped_refptr<VideoFrame>& video_frame) { + EnqueueResult(video_frame); +} + +void FFmpegVideoDecoder::EnqueueEmptyFrame() { + scoped_refptr<VideoFrame> video_frame; + VideoFrame::CreateEmptyFrame(&video_frame); + EnqueueResult(video_frame); } FFmpegVideoDecoder::TimeTuple FFmpegVideoDecoder::FindPtsAndDuration( @@ -365,9 +254,9 @@ FFmpegVideoDecoder::TimeTuple FFmpegVideoDecoder::FindPtsAndDuration( return pts; } -bool FFmpegVideoDecoder::ProvidesBuffer() { - if (!decode_engine_.get()) return false; - return decode_engine_->ProvidesBuffer(); +void FFmpegVideoDecoder::SignalPipelineError() { + host()->SetError(PIPELINE_ERROR_DECODE); + state_ = kDecodeFinished; } void FFmpegVideoDecoder::SetVideoDecodeEngineForTest( diff --git a/media/filters/ffmpeg_video_decoder.h b/media/filters/ffmpeg_video_decoder.h index 92b6e78..65c4b75 100644 --- a/media/filters/ffmpeg_video_decoder.h +++ b/media/filters/ffmpeg_video_decoder.h @@ -7,8 +7,6 @@ #include "base/gtest_prod_util.h" #include "base/time.h" -#include "media/base/factory.h" -#include "media/base/filters.h" #include "media/base/pts_heap.h" #include "media/base/video_frame.h" #include "media/filters/decoder_base.h" @@ -20,7 +18,7 @@ namespace media { class VideoDecodeEngine; -class FFmpegVideoDecoder : public VideoDecoder { +class FFmpegVideoDecoder : public DecoderBase<VideoDecoder, VideoFrame> { public: explicit FFmpegVideoDecoder(VideoDecodeEngine* engine); virtual ~FFmpegVideoDecoder(); @@ -28,17 +26,16 @@ class FFmpegVideoDecoder : public VideoDecoder { static FilterFactory* CreateFactory(); static bool IsMediaFormatSupported(const MediaFormat& media_format); - // MediaFilter implementation. - virtual void Stop(FilterCallback* callback); - virtual void Seek(base::TimeDelta time, FilterCallback* callback); - virtual void Flush(FilterCallback* callback); + protected: + virtual void DoInitialize(DemuxerStream* demuxer_stream, bool* success, + Task* done_cb); + virtual void DoStop(Task* done_cb); + virtual void DoSeek(base::TimeDelta time, Task* done_cb); + virtual void DoDecode(Buffer* input); - // Decoder implementation. - virtual void Initialize(DemuxerStream* demuxer_stream, - FilterCallback* callback); - virtual const MediaFormat& media_format() { return media_format_; } - virtual void FillThisBuffer(scoped_refptr<VideoFrame> video_frame); - virtual bool ProvidesBuffer(); + protected: + virtual void OnEmptyBufferDone(scoped_refptr<Buffer> buffer); + virtual void FillThisBuffer(scoped_refptr<VideoFrame> frame); private: friend class FilterFactoryImpl1<FFmpegVideoDecoder, VideoDecodeEngine*>; @@ -61,29 +58,26 @@ class FFmpegVideoDecoder : public VideoDecoder { }; enum DecoderState { - kUnInitialized, kNormal, kFlushCodec, kDecodeFinished, - kStopped }; - void OnInitializeComplete(FilterCallback* done_cb); - void OnStopComplete(FilterCallback* callback); - void OnFlushComplete(FilterCallback* callback); - void OnSeekComplete(FilterCallback* callback); - void OnReadComplete(Buffer* buffer); + // Implement DecoderBase template methods. + virtual void EnqueueVideoFrame(const scoped_refptr<VideoFrame>& video_frame); - // TODO(jiesun): until demuxer pass scoped_refptr<Buffer>: we could not merge - // this with OnReadComplete - void OnReadCompleteTask(scoped_refptr<Buffer> buffer); + // Create an empty video frame and queue it. + virtual void EnqueueEmptyFrame(); - virtual void OnEngineEmptyBufferDone(scoped_refptr<Buffer> buffer); - virtual void OnEngineFillBufferDone(scoped_refptr<VideoFrame> video_frame); + // Methods that pickup after the decode engine has finished its action. + virtual void OnInitializeComplete(bool* success /* Not owned */, + Task* done_cb); + + virtual void OnDecodeComplete(scoped_refptr<VideoFrame> video_frame); // Attempt to get the PTS and Duration for this frame by examining the time // info provided via packet stream (stored in |pts_heap|), or the info - // written into the AVFrame itself. If no data is available in either, then + // writen into the AVFrame itself. If no data is available in either, then // attempt to generate a best guess of the pts based on the last known pts. // // Data inside the AVFrame (if provided) is trusted the most, followed @@ -94,13 +88,16 @@ class FFmpegVideoDecoder : public VideoDecoder { const TimeTuple& last_pts, const VideoFrame* frame); + // Signals the pipeline that a decode error occurs, and moves the decoder + // into the kDecodeFinished state. + virtual void SignalPipelineError(); + // Injection point for unittest to provide a mock engine. Takes ownership of // the provided engine. virtual void SetVideoDecodeEngineForTest(VideoDecodeEngine* engine); size_t width_; size_t height_; - MediaFormat media_format_; PtsHeap pts_heap_; // Heap of presentation timestamps. TimeTuple last_pts_; @@ -108,15 +105,6 @@ class FFmpegVideoDecoder : public VideoDecoder { DecoderState state_; scoped_ptr<VideoDecodeEngine> decode_engine_; - // Tracks the number of asynchronous reads issued to |demuxer_stream_|. - // Using size_t since it is always compared against deque::size(). - size_t pending_reads_; - // Tracks the number of asynchronous reads issued from renderer. - size_t pending_requests_; - - // Pointer to the demuxer stream that will feed us compressed buffers. - scoped_refptr<DemuxerStream> demuxer_stream_; - DISALLOW_COPY_AND_ASSIGN(FFmpegVideoDecoder); }; diff --git a/media/filters/ffmpeg_video_decoder_unittest.cc b/media/filters/ffmpeg_video_decoder_unittest.cc index cff038e..228d3c8 100644 --- a/media/filters/ffmpeg_video_decoder_unittest.cc +++ b/media/filters/ffmpeg_video_decoder_unittest.cc @@ -59,10 +59,8 @@ class MockVideoDecodeEngine : public VideoDecodeEngine { MOCK_METHOD1(Stop, void(Task* done_cb)); MOCK_METHOD1(Pause, void(Task* done_cb)); MOCK_METHOD1(Flush, void(Task* done_cb)); - MOCK_METHOD1(Seek, void(Task* done_cb)); MOCK_CONST_METHOD0(state, State()); MOCK_CONST_METHOD0(GetSurfaceFormat, VideoFrame::Format()); - MOCK_CONST_METHOD0(ProvidesBuffer, bool()); scoped_ptr<FillThisBufferCallback> fill_buffer_callback_; scoped_ptr<EmptyThisBufferCallback> empty_buffer_callback_; @@ -75,26 +73,23 @@ class DecoderPrivateMock : public FFmpegVideoDecoder { : FFmpegVideoDecoder(engine) { } + MOCK_METHOD1(EnqueueVideoFrame, + void(const scoped_refptr<VideoFrame>& video_frame)); + MOCK_METHOD0(EnqueueEmptyFrame, void()); + MOCK_METHOD4(FindPtsAndDuration, TimeTuple( + const AVRational& time_base, + PtsHeap* pts_heap, + const TimeTuple& last_pts, + const VideoFrame* frame)); + MOCK_METHOD0(SignalPipelineError, void()); + MOCK_METHOD1(OnEmptyBufferDone, void(scoped_refptr<Buffer> buffer)); + // change access qualifier for test: used in actions. - void OnEngineEmptyBufferDone(scoped_refptr<Buffer> buffer) { - FFmpegVideoDecoder::OnEngineEmptyBufferDone(buffer); - } - void OnEngineFillBufferDone(scoped_refptr<VideoFrame> frame) { - FFmpegVideoDecoder::OnEngineFillBufferDone(frame); - } - void OnReadComplete(Buffer* buffer) { - FFmpegVideoDecoder::OnReadComplete(buffer); + void OnDecodeComplete(scoped_refptr<VideoFrame> video_frame) { + FFmpegVideoDecoder::OnDecodeComplete(video_frame); } }; -ACTION_P(SaveFillCallback, engine) { - engine->fill_buffer_callback_.reset(arg3); -} - -ACTION_P(SaveEmptyCallback, engine) { - engine->empty_buffer_callback_.reset(arg2); -} - // Fixture class to facilitate writing tests. Takes care of setting up the // FFmpeg, pipeline and filter host mocks. class FFmpegVideoDecoderTest : public testing::Test { @@ -103,7 +98,6 @@ class FFmpegVideoDecoderTest : public testing::Test { static const int kHeight; static const FFmpegVideoDecoder::TimeTuple kTestPts1; static const FFmpegVideoDecoder::TimeTuple kTestPts2; - static const FFmpegVideoDecoder::TimeTuple kTestPts3; FFmpegVideoDecoderTest() { MediaFormat media_format; @@ -113,8 +107,7 @@ class FFmpegVideoDecoderTest : public testing::Test { // // TODO(ajwong): Break the test's dependency on FFmpegVideoDecoder. factory_ = FFmpegVideoDecoder::CreateFactory(); - decoder_ = factory_->Create<DecoderPrivateMock>(media_format); - renderer_ = new MockVideoRenderer(); + decoder_ = factory_->Create<FFmpegVideoDecoder>(media_format); engine_ = new StrictMock<MockVideoDecodeEngine>(); DCHECK(decoder_); @@ -137,8 +130,6 @@ class FFmpegVideoDecoderTest : public testing::Test { stream_.codec = &codec_context_; codec_context_.width = kWidth; codec_context_.height = kHeight; - stream_.r_frame_rate.num = 1; - stream_.r_frame_rate.den = 1; buffer_ = new DataBuffer(1); end_of_stream_buffer_ = new DataBuffer(0); @@ -161,34 +152,10 @@ class FFmpegVideoDecoderTest : public testing::Test { MockFFmpeg::set(NULL); } - void InitializeDecoderSuccessfully() { - // Test successful initialization. - AVStreamProvider* av_stream_provider = demuxer_; - EXPECT_CALL(*demuxer_, QueryInterface(AVStreamProvider::interface_id())) - .WillOnce(Return(av_stream_provider)); - EXPECT_CALL(*demuxer_, GetAVStream()) - .WillOnce(Return(&stream_)); - - EXPECT_CALL(*engine_, Initialize(_, _, _, _, _)) - .WillOnce(DoAll(SaveFillCallback(engine_), - SaveEmptyCallback(engine_), - WithArg<4>(InvokeRunnable()))); - EXPECT_CALL(*engine_, state()) - .WillOnce(Return(VideoDecodeEngine::kNormal)); - EXPECT_CALL(*engine_, GetSurfaceFormat()) - .WillOnce(Return(VideoFrame::YV12)); - - EXPECT_CALL(callback_, OnFilterCallback()); - EXPECT_CALL(callback_, OnCallbackDestroyed()); - - decoder_->Initialize(demuxer_, callback_.NewCallback()); - message_loop_.RunAllPending(); - } // Fixture members. scoped_refptr<FilterFactory> factory_; MockVideoDecodeEngine* engine_; // Owned by |decoder_|. - scoped_refptr<DecoderPrivateMock> decoder_; - scoped_refptr<MockVideoRenderer> renderer_; + scoped_refptr<FFmpegVideoDecoder> decoder_; scoped_refptr<StrictMock<MockFFmpegDemuxerStream> > demuxer_; scoped_refptr<DataBuffer> buffer_; scoped_refptr<DataBuffer> end_of_stream_buffer_; @@ -216,9 +183,6 @@ const FFmpegVideoDecoder::TimeTuple FFmpegVideoDecoderTest::kTestPts1 = const FFmpegVideoDecoder::TimeTuple FFmpegVideoDecoderTest::kTestPts2 = { base::TimeDelta::FromMicroseconds(456), base::TimeDelta::FromMicroseconds(60) }; -const FFmpegVideoDecoder::TimeTuple FFmpegVideoDecoderTest::kTestPts3 = - { base::TimeDelta::FromMicroseconds(789), - base::TimeDelta::FromMicroseconds(60) }; TEST(FFmpegVideoDecoderFactoryTest, Create) { // Should only accept video/x-ffmpeg mime type. @@ -244,13 +208,19 @@ TEST_F(FFmpegVideoDecoderTest, Initialize_QueryInterfaceFails) { EXPECT_CALL(host_, SetError(PIPELINE_ERROR_DECODE)); EXPECT_CALL(callback_, OnFilterCallback()); EXPECT_CALL(callback_, OnCallbackDestroyed()); - EXPECT_CALL(*engine_, state()) - .WillOnce(Return(VideoDecodeEngine::kCreated)); decoder_->Initialize(demuxer_, callback_.NewCallback()); message_loop_.RunAllPending(); } +ACTION_P(SaveFillCallback, engine) { + engine->fill_buffer_callback_.reset(arg3); +} + +ACTION_P(SaveEmptyCallback, engine) { + engine->empty_buffer_callback_.reset(arg2); +} + TEST_F(FFmpegVideoDecoderTest, Initialize_EngineFails) { // Test successful initialization. AVStreamProvider* av_stream_provider = demuxer_; @@ -276,7 +246,27 @@ TEST_F(FFmpegVideoDecoderTest, Initialize_EngineFails) { } TEST_F(FFmpegVideoDecoderTest, Initialize_Successful) { - InitializeDecoderSuccessfully(); + // Test successful initialization. + AVStreamProvider* av_stream_provider = demuxer_; + EXPECT_CALL(*demuxer_, QueryInterface(AVStreamProvider::interface_id())) + .WillOnce(Return(av_stream_provider)); + EXPECT_CALL(*demuxer_, GetAVStream()) + .WillOnce(Return(&stream_)); + + EXPECT_CALL(*engine_, Initialize(_, _, _, _, _)) + .WillOnce(DoAll(SaveFillCallback(engine_), + SaveEmptyCallback(engine_), + WithArg<4>(InvokeRunnable()))); + EXPECT_CALL(*engine_, state()) + .WillOnce(Return(VideoDecodeEngine::kNormal)); + EXPECT_CALL(*engine_, GetSurfaceFormat()) + .WillOnce(Return(VideoFrame::YV12)); + + EXPECT_CALL(callback_, OnFilterCallback()); + EXPECT_CALL(callback_, OnCallbackDestroyed()); + + decoder_->Initialize(demuxer_, callback_.NewCallback()); + message_loop_.RunAllPending(); // Test that the output media format is an uncompressed video surface that // matches the dimensions specified by FFmpeg. @@ -365,26 +355,13 @@ TEST_F(FFmpegVideoDecoderTest, FindPtsAndDuration) { EXPECT_EQ(789, result_pts.duration.InMicroseconds()); } -ACTION_P2(ReadFromDemux, decoder, buffer) { - decoder->OnEngineEmptyBufferDone(buffer); -} - -ACTION_P3(ReturnFromDemux, decoder, buffer, time_tuple) { - delete arg0; - buffer->SetTimestamp(time_tuple.timestamp); - buffer->SetDuration(time_tuple.duration); - decoder->OnReadComplete(buffer); +ACTION_P2(DecodeComplete, decoder, video_frame) { + decoder->OnDecodeComplete(video_frame); } -ACTION_P3(DecodeComplete, decoder, video_frame, time_tuple) { - video_frame->SetTimestamp(time_tuple.timestamp); - video_frame->SetDuration(time_tuple.duration); - decoder->OnEngineFillBufferDone(video_frame); -} -ACTION_P2(DecodeNotComplete, decoder, buffer) { +ACTION_P2(DecodeNotComplete, decoder, video_frame) { scoped_refptr<VideoFrame> null_frame; - decoder->OnEngineFillBufferDone(null_frame); - decoder->OnEngineEmptyBufferDone(buffer); + decoder->OnDecodeComplete(null_frame); } ACTION_P(ConsumePTS, pts_heap) { @@ -402,80 +379,100 @@ TEST_F(FFmpegVideoDecoderTest, DoDecode_TestStateTransition) { // 4) kDecodeFinished is never left regardless of what kind of buffer is // given. // 5) All state transitions happen as expected. - InitializeDecoderSuccessfully(); + MockVideoDecodeEngine* mock_engine = new StrictMock<MockVideoDecodeEngine>(); + scoped_refptr<DecoderPrivateMock> mock_decoder = + new StrictMock<DecoderPrivateMock>(mock_engine); + mock_decoder->set_message_loop(&message_loop_); - decoder_->set_fill_buffer_done_callback( - NewCallback(renderer_.get(), &MockVideoRenderer::FillThisBufferDone)); + // Setup decoder to buffer one frame, decode one frame, fail one frame, + // decode one more, and then fail the last one to end decoding. + EXPECT_CALL(*mock_engine, EmptyThisBuffer(_)) + .WillOnce(DecodeNotComplete(mock_decoder.get(), video_frame_)) + .WillOnce(DecodeComplete(mock_decoder.get(), video_frame_)) + .WillOnce(DecodeNotComplete(mock_decoder.get(), video_frame_)) + .WillOnce(DecodeComplete(mock_decoder.get(), video_frame_)) + .WillOnce(DecodeComplete(mock_decoder.get(), video_frame_)) + .WillOnce(DecodeNotComplete(mock_decoder.get(), video_frame_)); + PtsHeap* pts_heap = &mock_decoder->pts_heap_; + EXPECT_CALL(*mock_decoder, FindPtsAndDuration(_, _, _, _)) + .WillOnce(DoAll(ConsumePTS(pts_heap), Return(kTestPts1))) + .WillOnce(DoAll(ConsumePTS(pts_heap), Return(kTestPts2))) + .WillOnce(DoAll(ConsumePTS(pts_heap), Return(kTestPts1))); + EXPECT_CALL(*mock_decoder, EnqueueVideoFrame(_)) + .Times(3); + EXPECT_CALL(*mock_decoder, EnqueueEmptyFrame()) + .Times(1); + EXPECT_CALL(*mock_decoder, OnEmptyBufferDone(_)) + .Times(6); // Setup initial state and check that it is sane. - ASSERT_EQ(FFmpegVideoDecoder::kNormal, decoder_->state_); - ASSERT_TRUE(base::TimeDelta() == decoder_->last_pts_.timestamp); - ASSERT_TRUE(base::TimeDelta() == decoder_->last_pts_.duration); + ASSERT_EQ(FFmpegVideoDecoder::kNormal, mock_decoder->state_); + ASSERT_TRUE(base::TimeDelta() == mock_decoder->last_pts_.timestamp); + ASSERT_TRUE(base::TimeDelta() == mock_decoder->last_pts_.duration); + // Decode once, which should simulate a buffering call. + mock_decoder->DoDecode(buffer_); + EXPECT_EQ(FFmpegVideoDecoder::kNormal, mock_decoder->state_); + ASSERT_TRUE(base::TimeDelta() == mock_decoder->last_pts_.timestamp); + ASSERT_TRUE(base::TimeDelta() == mock_decoder->last_pts_.duration); + EXPECT_FALSE(mock_decoder->pts_heap_.IsEmpty()); + + // Decode a second time, which should yield the first frame. + mock_decoder->DoDecode(buffer_); + EXPECT_EQ(FFmpegVideoDecoder::kNormal, mock_decoder->state_); + EXPECT_TRUE(kTestPts1.timestamp == mock_decoder->last_pts_.timestamp); + EXPECT_TRUE(kTestPts1.duration == mock_decoder->last_pts_.duration); + EXPECT_FALSE(mock_decoder->pts_heap_.IsEmpty()); + + // Decode a third time, with a regular buffer. The decode will error + // out, but the state should be the same. + mock_decoder->DoDecode(buffer_); + EXPECT_EQ(FFmpegVideoDecoder::kNormal, mock_decoder->state_); + EXPECT_TRUE(kTestPts1.timestamp == mock_decoder->last_pts_.timestamp); + EXPECT_TRUE(kTestPts1.duration == mock_decoder->last_pts_.duration); + EXPECT_FALSE(mock_decoder->pts_heap_.IsEmpty()); + + // Decode a fourth time, with an end of stream buffer. This should + // yield the second frame, and stay in flushing mode. + mock_decoder->DoDecode(end_of_stream_buffer_); + EXPECT_EQ(FFmpegVideoDecoder::kFlushCodec, mock_decoder->state_); + EXPECT_TRUE(kTestPts2.timestamp == mock_decoder->last_pts_.timestamp); + EXPECT_TRUE(kTestPts2.duration == mock_decoder->last_pts_.duration); + EXPECT_FALSE(mock_decoder->pts_heap_.IsEmpty()); + + // Decode a fifth time with an end of stream buffer. this should + // yield the third frame. + mock_decoder->DoDecode(end_of_stream_buffer_); + EXPECT_EQ(FFmpegVideoDecoder::kFlushCodec, mock_decoder->state_); + EXPECT_TRUE(kTestPts1.timestamp == mock_decoder->last_pts_.timestamp); + EXPECT_TRUE(kTestPts1.duration == mock_decoder->last_pts_.duration); + EXPECT_TRUE(mock_decoder->pts_heap_.IsEmpty()); + + // Decode a sixth time with an end of stream buffer. This should + // Move into kDecodeFinished. + mock_decoder->DoDecode(end_of_stream_buffer_); + EXPECT_EQ(FFmpegVideoDecoder::kDecodeFinished, mock_decoder->state_); + EXPECT_TRUE(kTestPts1.timestamp == mock_decoder->last_pts_.timestamp); + EXPECT_TRUE(kTestPts1.duration == mock_decoder->last_pts_.duration); + EXPECT_TRUE(mock_decoder->pts_heap_.IsEmpty()); +} - // Setup decoder to buffer one frame, decode one frame, fail one frame, - // decode one more, and then fail the last one to end decoding. - EXPECT_CALL(*engine_, FillThisBuffer(_)) - .Times(4) - .WillRepeatedly(ReadFromDemux(decoder_.get(), buffer_)); - EXPECT_CALL(*demuxer_.get(), Read(_)) - .Times(6) - .WillOnce(ReturnFromDemux(decoder_.get(), buffer_, kTestPts1)) - .WillOnce(ReturnFromDemux(decoder_.get(), buffer_, kTestPts3)) - .WillOnce(ReturnFromDemux(decoder_.get(), buffer_, kTestPts2)) - .WillOnce(ReturnFromDemux(decoder_.get(), - end_of_stream_buffer_, kTestPts3)) - .WillOnce(ReturnFromDemux(decoder_.get(), - end_of_stream_buffer_, kTestPts3)) - .WillOnce(ReturnFromDemux(decoder_.get(), - end_of_stream_buffer_, kTestPts3)); - EXPECT_CALL(*engine_, EmptyThisBuffer(_)) - .WillOnce(DecodeNotComplete(decoder_.get(), buffer_)) - .WillOnce(DecodeComplete(decoder_.get(), video_frame_, kTestPts1)) - .WillOnce(DecodeNotComplete(decoder_.get(), buffer_)) - .WillOnce(DecodeComplete(decoder_.get(), video_frame_, kTestPts2)) - .WillOnce(DecodeComplete(decoder_.get(), video_frame_, kTestPts3)) - .WillOnce(DecodeNotComplete(decoder_.get(), buffer_)); - EXPECT_CALL(*renderer_.get(), FillThisBufferDone(_)) - .Times(4); - - // First request from renderer: at first round decode engine did not produce - // any frame. Decoder will issue another read from demuxer. at second round - // decode engine will get a valid frame. - decoder_->FillThisBuffer(video_frame_); - message_loop_.RunAllPending(); - EXPECT_EQ(FFmpegVideoDecoder::kNormal, decoder_->state_); - ASSERT_TRUE(kTestPts1.timestamp == decoder_->last_pts_.timestamp); - ASSERT_TRUE(kTestPts1.duration == decoder_->last_pts_.duration); - EXPECT_FALSE(decoder_->pts_heap_.IsEmpty()); - - // Second request from renderer: at first round decode engine did not produce - // any frame. Decoder will issue another read from demuxer. at second round - // decode engine will get a valid frame. - decoder_->FillThisBuffer(video_frame_); - message_loop_.RunAllPending(); - EXPECT_EQ(FFmpegVideoDecoder::kFlushCodec, decoder_->state_); - EXPECT_TRUE(kTestPts2.timestamp == decoder_->last_pts_.timestamp); - EXPECT_TRUE(kTestPts2.duration == decoder_->last_pts_.duration); - EXPECT_FALSE(decoder_->pts_heap_.IsEmpty()); - - // Third request from renderer: decode engine will return frame on the - // first round. Input stream had reach EOS, therefore we had entered - // kFlushCodec state after this call. - decoder_->FillThisBuffer(video_frame_); - message_loop_.RunAllPending(); - EXPECT_EQ(FFmpegVideoDecoder::kFlushCodec, decoder_->state_); - EXPECT_TRUE(kTestPts3.timestamp == decoder_->last_pts_.timestamp); - EXPECT_TRUE(kTestPts3.duration == decoder_->last_pts_.duration); - EXPECT_TRUE(decoder_->pts_heap_.IsEmpty()); - - // Fourth request from renderer: Both input/output reach EOF. therefore - // we had reached the kDecodeFinished state after this call. - decoder_->FillThisBuffer(video_frame_); - message_loop_.RunAllPending(); - EXPECT_EQ(FFmpegVideoDecoder::kDecodeFinished, decoder_->state_); - EXPECT_TRUE(kTestPts3.timestamp == decoder_->last_pts_.timestamp); - EXPECT_TRUE(kTestPts3.duration == decoder_->last_pts_.duration); - EXPECT_TRUE(decoder_->pts_heap_.IsEmpty()); +TEST_F(FFmpegVideoDecoderTest, DoDecode_FinishEnqueuesEmptyFrames) { + MockVideoDecodeEngine* mock_engine = new StrictMock<MockVideoDecodeEngine>(); + scoped_refptr<DecoderPrivateMock> mock_decoder = + new StrictMock<DecoderPrivateMock>(mock_engine); + + // Move the decoder into the finished state for this test. + mock_decoder->state_ = FFmpegVideoDecoder::kDecodeFinished; + + // Expect 2 calls, make two calls. If kDecodeFinished is set, the buffer is + // not even examined. + EXPECT_CALL(*mock_decoder, EnqueueEmptyFrame()).Times(3); + EXPECT_CALL(*mock_decoder, OnEmptyBufferDone(_)).Times(3); + + mock_decoder->DoDecode(NULL); + mock_decoder->DoDecode(buffer_); + mock_decoder->DoDecode(end_of_stream_buffer_); + EXPECT_EQ(FFmpegVideoDecoder::kDecodeFinished, mock_decoder->state_); } TEST_F(FFmpegVideoDecoderTest, DoSeek) { @@ -487,47 +484,33 @@ TEST_F(FFmpegVideoDecoderTest, DoSeek) { FFmpegVideoDecoder::kNormal, FFmpegVideoDecoder::kFlushCodec, FFmpegVideoDecoder::kDecodeFinished, - FFmpegVideoDecoder::kStopped, }; - InitializeDecoderSuccessfully(); - for (size_t i = 0; i < ARRAYSIZE_UNSAFE(kStates); ++i) { SCOPED_TRACE(Message() << "Iteration " << i); - // Push in some timestamps. - decoder_->pts_heap_.Push(kTestPts1.timestamp); - decoder_->pts_heap_.Push(kTestPts2.timestamp); - decoder_->pts_heap_.Push(kTestPts3.timestamp); - - decoder_->state_ = kStates[i]; + MockVideoDecodeEngine* mock_engine = + new StrictMock<MockVideoDecodeEngine>(); + scoped_refptr<DecoderPrivateMock> mock_decoder = + new StrictMock<DecoderPrivateMock>(mock_engine); - // Expect a pause. - // TODO(jiesun): call engine's Pause(). - StrictMock<MockFilterCallback> pause_done_cb; - EXPECT_CALL(pause_done_cb, OnFilterCallback()); - EXPECT_CALL(pause_done_cb, OnCallbackDestroyed()); - decoder_->Pause(pause_done_cb.NewCallback()); + // Push in some timestamps. + mock_decoder->pts_heap_.Push(kTestPts1.timestamp); + mock_decoder->pts_heap_.Push(kTestPts2.timestamp); + mock_decoder->pts_heap_.Push(kTestPts1.timestamp); // Expect a flush. - EXPECT_CALL(*engine_, Flush(_)) - .WillOnce(WithArg<0>(InvokeRunnable())); - StrictMock<MockFilterCallback> flush_done_cb; - EXPECT_CALL(flush_done_cb, OnFilterCallback()); - EXPECT_CALL(flush_done_cb, OnCallbackDestroyed()); - decoder_->Flush(flush_done_cb.NewCallback()); - - // Expect Seek and verify the results. - EXPECT_CALL(*engine_, Seek(_)) + mock_decoder->state_ = kStates[i]; + EXPECT_CALL(*mock_engine, Flush(_)) .WillOnce(WithArg<0>(InvokeRunnable())); - StrictMock<MockFilterCallback> seek_done_cb; - EXPECT_CALL(seek_done_cb, OnFilterCallback()); - EXPECT_CALL(seek_done_cb, OnCallbackDestroyed()); - decoder_->Seek(kZero, seek_done_cb.NewCallback()); + TaskMocker done_cb; + EXPECT_CALL(done_cb, Run()).Times(1); - EXPECT_TRUE(decoder_->pts_heap_.IsEmpty()); - EXPECT_EQ(FFmpegVideoDecoder::kNormal, decoder_->state_); + // Seek and verify the results. + mock_decoder->DoSeek(kZero, done_cb.CreateTask()); + EXPECT_TRUE(mock_decoder->pts_heap_.IsEmpty()); + EXPECT_EQ(FFmpegVideoDecoder::kNormal, mock_decoder->state_); } } diff --git a/media/filters/omx_video_decode_engine.cc b/media/filters/omx_video_decode_engine.cc index 5e26bfa..db5b402 100644 --- a/media/filters/omx_video_decode_engine.cc +++ b/media/filters/omx_video_decode_engine.cc @@ -255,17 +255,6 @@ void OmxVideoDecodeEngine::PortFlushDone(int port) { ComponentFlushDone(); } -void OmxVideoDecodeEngine::Seek(Task* done_cb) { - message_loop_->PostTask(FROM_HERE, - NewRunnableMethod(this, &OmxVideoDecodeEngine::SeekTask, done_cb)); -} - -void OmxVideoDecodeEngine::SeekTask(Task* done_cb) { - DCHECK_EQ(message_loop_, MessageLoop::current()); - AutoTaskRunner done_runner(done_cb); - // TODO(jiesun): add real logic here. -} - VideoFrame::Format OmxVideoDecodeEngine::GetSurfaceFormat() const { // TODO(jiesun): Both OmxHeaderType and EGLImage surface type could have // different surface formats. @@ -682,10 +671,6 @@ void OmxVideoDecodeEngine::FillThisBuffer( } } -bool OmxVideoDecodeEngine::ProvidesBuffer() const { - return !uses_egl_image_; -} - // Reconfigure port void OmxVideoDecodeEngine::OnPortSettingsChangedRun(int port, OMX_INDEXTYPE index) { diff --git a/media/filters/omx_video_decode_engine.h b/media/filters/omx_video_decode_engine.h index db2c461..7c1644f 100644 --- a/media/filters/omx_video_decode_engine.h +++ b/media/filters/omx_video_decode_engine.h @@ -44,11 +44,9 @@ class OmxVideoDecodeEngine : Task* done_cb); virtual void EmptyThisBuffer(scoped_refptr<Buffer> buffer); virtual void FillThisBuffer(scoped_refptr<VideoFrame> video_frame); - virtual bool ProvidesBuffer() const; virtual void Stop(Task* done_cb); virtual void Pause(Task* done_cb); virtual void Flush(Task* done_cb); - virtual void Seek(Task* done_cb); virtual VideoFrame::Format GetSurfaceFormat() const; virtual State state() const; @@ -102,7 +100,6 @@ class OmxVideoDecodeEngine : void StopTask(Task* task); void PauseTask(Task* task); void FlushTask(Task* task); - void SeekTask(Task* task); // Transition method sequence for initialization bool CreateComponent(); diff --git a/media/filters/omx_video_decoder.cc b/media/filters/omx_video_decoder.cc index 3617ba3..f36a305 100644 --- a/media/filters/omx_video_decoder.cc +++ b/media/filters/omx_video_decoder.cc @@ -72,11 +72,6 @@ void OmxVideoDecoder::FillThisBuffer(scoped_refptr<VideoFrame> frame) { &OmxVideoDecodeEngine::FillThisBuffer, frame)); } -bool OmxVideoDecoder::ProvidesBuffer() { - if (!omx_engine_.get()) return false; - return omx_engine_->ProvidesBuffer(); -} - void OmxVideoDecoder::Stop(FilterCallback* callback) { omx_engine_->Stop( NewRunnableMethod(this, @@ -97,19 +92,9 @@ void OmxVideoDecoder::PauseCompleteTask(FilterCallback* callback) { AutoCallbackRunner done_runner(callback); } -void OmxVideoDecoder::Flush(FilterCallback* callback) { - omx_engine_->Flush( - NewRunnableMethod(this, - &OmxVideoDecoder::FlushCompleteTask, callback)); -} - -void OmxVideoDecoder::FlushCompleteTask(FilterCallback* callback) { - AutoCallbackRunner done_runner(callback); -} - void OmxVideoDecoder::Seek(base::TimeDelta time, FilterCallback* callback) { - omx_engine_->Seek( + omx_engine_->Flush( NewRunnableMethod(this, &OmxVideoDecoder::SeekCompleteTask, callback)); } diff --git a/media/filters/omx_video_decoder.h b/media/filters/omx_video_decoder.h index 7699514..5f747ad 100644 --- a/media/filters/omx_video_decoder.h +++ b/media/filters/omx_video_decoder.h @@ -30,10 +30,8 @@ class OmxVideoDecoder : public VideoDecoder { virtual void Initialize(DemuxerStream* stream, FilterCallback* callback); virtual void Stop(FilterCallback* callback); virtual void Pause(FilterCallback* callback); - virtual void Flush(FilterCallback* callback); virtual void Seek(base::TimeDelta time, FilterCallback* callback); virtual void FillThisBuffer(scoped_refptr<VideoFrame> frame); - virtual bool ProvidesBuffer(); virtual const MediaFormat& media_format() { return media_format_; } private: @@ -49,7 +47,6 @@ class OmxVideoDecoder : public VideoDecoder { void StopCompleteTask(FilterCallback* callback); void PauseCompleteTask(FilterCallback* callback); - void FlushCompleteTask(FilterCallback* callback); void SeekCompleteTask(FilterCallback* callback); // TODO(hclam): This is very ugly that we keep reference instead of diff --git a/media/filters/video_decode_engine.h b/media/filters/video_decode_engine.h index 3efe647..8dc9d25 100644 --- a/media/filters/video_decode_engine.h +++ b/media/filters/video_decode_engine.h @@ -26,7 +26,6 @@ class VideoDecodeEngine { kCreated, kNormal, kStopped, - kFlushing, kError, }; @@ -66,7 +65,6 @@ class VideoDecodeEngine { virtual void Stop(Task* done_cb) = 0; virtual void Pause(Task* done_cb) = 0; - virtual void Seek(Task* done_cb) = 0; // Flushes the decode engine of any buffered input packets. virtual void Flush(Task* done_cb) = 0; @@ -75,8 +73,6 @@ class VideoDecodeEngine { // DecodeFrame(). virtual VideoFrame::Format GetSurfaceFormat() const = 0; - virtual bool ProvidesBuffer() const = 0; - // Returns the current state of the decode engine. virtual State state() const = 0; }; diff --git a/media/filters/video_renderer_base.cc b/media/filters/video_renderer_base.cc index 166c992..00cf9dc 100644 --- a/media/filters/video_renderer_base.cc +++ b/media/filters/video_renderer_base.cc @@ -4,7 +4,6 @@ #include "base/callback.h" #include "media/base/buffers.h" -#include "media/base/callback.h" #include "media/base/filter_host.h" #include "media/base/video_frame.h" #include "media/filters/video_renderer_base.h" @@ -91,7 +90,7 @@ bool VideoRendererBase::ParseMediaFormat( void VideoRendererBase::Play(FilterCallback* callback) { AutoLock auto_lock(lock_); - DCHECK(kPaused == state_ || kFlushing == state_); + DCHECK_EQ(kPaused, state_); scoped_ptr<FilterCallback> c(callback); state_ = kPlaying; callback->Run(); @@ -100,24 +99,17 @@ void VideoRendererBase::Play(FilterCallback* callback) { void VideoRendererBase::Pause(FilterCallback* callback) { AutoLock auto_lock(lock_); DCHECK(state_ == kPlaying || state_ == kEnded); - AutoCallbackRunner done_runner(callback); + pause_callback_.reset(callback); state_ = kPaused; -} - -void VideoRendererBase::Flush(FilterCallback* callback) { - DCHECK(state_ == kPaused); - - AutoLock auto_lock(lock_); - flush_callback_.reset(callback); - state_ = kFlushing; + // TODO(jiesun): currently we use Pause() to fulfill Flush(). // Filter is considered paused when we've finished all pending reads, which // implies all buffers are returned to owner in Decoder/Renderer. Renderer // is considered paused with one more contingency that |pending_paint_| is // false, such that no client of us is holding any reference to VideoFrame. if (pending_reads_ == 0 && pending_paint_ == false) { - flush_callback_->Run(); - flush_callback_.reset(); + pause_callback_->Run(); + pause_callback_.reset(); FlushBuffers(); } } @@ -155,7 +147,7 @@ void VideoRendererBase::SetPlaybackRate(float playback_rate) { void VideoRendererBase::Seek(base::TimeDelta time, FilterCallback* callback) { AutoLock auto_lock(lock_); - DCHECK(kPaused == state_ || kFlushing == state_); + DCHECK_EQ(kPaused, state_); DCHECK_EQ(0u, pending_reads_) << "Pending reads should have completed"; state_ = kSeeking; seek_callback_.reset(callback); @@ -250,7 +242,8 @@ void VideoRendererBase::ThreadMain() { if (state_ == kStopped) return; - if (state_ != kPlaying || playback_rate_ == 0) { + if (state_ == kPaused || state_ == kSeeking || state_ == kEnded || + playback_rate_ == 0) { remaining_time = kIdleTimeDelta; } else if (frames_queue_ready_.empty() || frames_queue_ready_.front()->IsEndOfStream()) { @@ -357,7 +350,7 @@ void VideoRendererBase::GetCurrentFrame(scoped_refptr<VideoFrame>* frame_out) { // We should have initialized and have the current frame. DCHECK(state_ == kPaused || state_ == kSeeking || state_ == kPlaying || - state_ == kFlushing || state_ == kEnded); + state_ == kEnded); *frame_out = current_frame_; pending_paint_ = true; } @@ -375,11 +368,11 @@ void VideoRendererBase::PutCurrentFrame(scoped_refptr<VideoFrame> frame) { // frame is timed-out. We will wake up our main thread to advance the current // frame when this is true. frame_available_.Signal(); - if (state_ == kFlushing && pending_reads_ == 0 && flush_callback_.get()) { + if (state_ == kPaused && pending_reads_ == 0 && pause_callback_.get()) { // No more pending reads! We're now officially "paused". FlushBuffers(); - flush_callback_->Run(); - flush_callback_.reset(); + pause_callback_->Run(); + pause_callback_.reset(); } } @@ -394,7 +387,7 @@ void VideoRendererBase::OnFillBufferDone(scoped_refptr<VideoFrame> frame) { } DCHECK(state_ == kPaused || state_ == kSeeking || state_ == kPlaying || - state_ == kFlushing || state_ == kEnded); + state_ == kEnded); DCHECK_GT(pending_reads_, 0u); --pending_reads_; @@ -412,7 +405,7 @@ void VideoRendererBase::OnFillBufferDone(scoped_refptr<VideoFrame> frame) { // Check for our preroll complete condition. if (state_ == kSeeking) { DCHECK(seek_callback_.get()); - if (frames_queue_ready_.size() == kMaxFrames || frame->IsEndOfStream()) { + if (frames_queue_ready_.size() == kMaxFrames) { // We're paused, so make sure we update |current_frame_| to represent // our new location. state_ = kPaused; @@ -420,22 +413,18 @@ void VideoRendererBase::OnFillBufferDone(scoped_refptr<VideoFrame> frame) { // Because we might remain paused (i.e., we were not playing before we // received a seek), we can't rely on ThreadMain() to notify the subclass // the frame has been updated. - scoped_refptr<VideoFrame> first_frame; - first_frame = frames_queue_ready_.front(); - if (!first_frame->IsEndOfStream()) { - frames_queue_ready_.pop_front(); - current_frame_ = first_frame; - } + current_frame_ = frames_queue_ready_.front(); + frames_queue_ready_.pop_front(); OnFrameAvailable(); seek_callback_->Run(); seek_callback_.reset(); } - } else if (state_ == kFlushing && pending_reads_ == 0 && !pending_paint_) { + } else if (state_ == kPaused && pending_reads_ == 0 && !pending_paint_) { // No more pending reads! We're now officially "paused". - if (flush_callback_.get()) { - flush_callback_->Run(); - flush_callback_.reset(); + if (pause_callback_.get()) { + pause_callback_->Run(); + pause_callback_.reset(); } } } @@ -461,7 +450,7 @@ void VideoRendererBase::FlushBuffers() { // We should never put EOF frame into "done queue". while (!frames_queue_ready_.empty()) { scoped_refptr<VideoFrame> video_frame = frames_queue_ready_.front(); - if (!video_frame->IsEndOfStream()) { + if (video_frame->IsEndOfStream()) { frames_queue_done_.push_back(video_frame); } frames_queue_ready_.pop_front(); diff --git a/media/filters/video_renderer_base.h b/media/filters/video_renderer_base.h index 1aa42d2..f8176f7 100644 --- a/media/filters/video_renderer_base.h +++ b/media/filters/video_renderer_base.h @@ -46,7 +46,6 @@ class VideoRendererBase : public VideoRenderer, // MediaFilter implementation. virtual void Play(FilterCallback* callback); virtual void Pause(FilterCallback* callback); - virtual void Flush(FilterCallback* callback); virtual void Stop(FilterCallback* callback); virtual void SetPlaybackRate(float playback_rate); virtual void Seek(base::TimeDelta time, FilterCallback* callback); @@ -154,7 +153,6 @@ class VideoRendererBase : public VideoRenderer, enum State { kUninitialized, kPaused, - kFlushing, kSeeking, kPlaying, kEnded, @@ -180,7 +178,7 @@ class VideoRendererBase : public VideoRenderer, float playback_rate_; // Filter callbacks. - scoped_ptr<FilterCallback> flush_callback_; + scoped_ptr<FilterCallback> pause_callback_; scoped_ptr<FilterCallback> seek_callback_; base::TimeDelta seek_timestamp_; |