summaryrefslogtreecommitdiffstats
path: root/media
diff options
context:
space:
mode:
authorjiesun@google.com <jiesun@google.com@0039d316-1c4b-4281-b951-d872f2087c98>2010-05-20 15:58:47 +0000
committerjiesun@google.com <jiesun@google.com@0039d316-1c4b-4281-b951-d872f2087c98>2010-05-20 15:58:47 +0000
commitd55b3317839ae53e128b2eb104fd938429ddd330 (patch)
treef3a5c5b13139605d7824a18c98329cbad5426ada /media
parent23d67c409398db702f60bb5c972cddb564432d27 (diff)
downloadchromium_src-d55b3317839ae53e128b2eb104fd938429ddd330.zip
chromium_src-d55b3317839ae53e128b2eb104fd938429ddd330.tar.gz
chromium_src-d55b3317839ae53e128b2eb104fd938429ddd330.tar.bz2
refactoring engine interface for openmax.
currently only fillbufferdone callback and emptythisbuffer is used. Review URL: http://codereview.chromium.org/2095002 git-svn-id: svn://svn.chromium.org/chrome/trunk/src@47802 0039d316-1c4b-4281-b951-d872f2087c98
Diffstat (limited to 'media')
-rw-r--r--media/filters/decoder_base.h40
-rw-r--r--media/filters/decoder_base_unittest.cc10
-rw-r--r--media/filters/ffmpeg_audio_decoder.cc8
-rw-r--r--media/filters/ffmpeg_audio_decoder.h2
-rw-r--r--media/filters/ffmpeg_video_decode_engine.cc49
-rw-r--r--media/filters/ffmpeg_video_decode_engine.h13
-rw-r--r--media/filters/ffmpeg_video_decode_engine_unittest.cc77
-rw-r--r--media/filters/omx_video_decode_engine.cc75
-rw-r--r--media/filters/omx_video_decode_engine.h27
-rw-r--r--media/filters/video_decode_engine.h39
-rw-r--r--media/filters/video_decoder_impl.cc49
-rw-r--r--media/filters/video_decoder_impl.h9
-rw-r--r--media/filters/video_decoder_impl_unittest.cc92
13 files changed, 257 insertions, 233 deletions
diff --git a/media/filters/decoder_base.h b/media/filters/decoder_base.h
index 26b1d18..5642651 100644
--- a/media/filters/decoder_base.h
+++ b/media/filters/decoder_base.h
@@ -110,10 +110,28 @@ class DecoderBase : public Decoder {
// Method that must be implemented by the derived class. If the decode
// operation produces one or more outputs, the derived class should call
// the EnequeueResult() method from within this method.
- virtual void DoDecode(Buffer* input, Task* done_cb) = 0;
+ virtual void DoDecode(Buffer* input) = 0;
MediaFormat media_format_;
+ void OnDecodeComplete() {
+ // Attempt to fulfill a pending read callback and schedule additional reads
+ // if necessary.
+ bool fulfilled = FulfillPendingRead();
+
+ // Issue reads as necessary.
+ //
+ // Note that it's possible for us to decode but not produce a frame, in
+ // which case |pending_reads_| will remain less than |read_queue_| so we
+ // need to schedule an additional read.
+ DCHECK_LE(pending_reads_, read_queue_.size());
+ if (!fulfilled) {
+ DCHECK_LT(pending_reads_, read_queue_.size());
+ demuxer_stream_->Read(NewCallback(this, &DecoderBase::OnReadComplete));
+ ++pending_reads_;
+ }
+ }
+
private:
bool IsStopped() { return state_ == kStopped; }
@@ -233,25 +251,7 @@ class DecoderBase : public Decoder {
}
// Decode the frame right away.
- DoDecode(buffer, NewRunnableMethod(this, &DecoderBase::OnDecodeComplete));
- }
-
- void OnDecodeComplete() {
- // Attempt to fulfill a pending read callback and schedule additional reads
- // if necessary.
- bool fulfilled = FulfillPendingRead();
-
- // Issue reads as necessary.
- //
- // Note that it's possible for us to decode but not produce a frame, in
- // which case |pending_reads_| will remain less than |read_queue_| so we
- // need to schedule an additional read.
- DCHECK_LE(pending_reads_, read_queue_.size());
- if (!fulfilled) {
- DCHECK_LT(pending_reads_, read_queue_.size());
- demuxer_stream_->Read(NewCallback(this, &DecoderBase::OnReadComplete));
- ++pending_reads_;
- }
+ DoDecode(buffer);
}
// Attempts to fulfill a single pending read by dequeuing a buffer and read
diff --git a/media/filters/decoder_base_unittest.cc b/media/filters/decoder_base_unittest.cc
index fc9214c..5468602 100644
--- a/media/filters/decoder_base_unittest.cc
+++ b/media/filters/decoder_base_unittest.cc
@@ -54,7 +54,7 @@ class MockDecoderImpl : public media::DecoderBase<
Task* done_cb));
MOCK_METHOD0(DoStop, void());
MOCK_METHOD2(DoSeek, void(base::TimeDelta time, Task* done_cb));
- MOCK_METHOD2(DoDecode, void(media::Buffer* input, Task* done_cb));
+ MOCK_METHOD1(DoDecode, void(media::Buffer* input));
private:
FRIEND_TEST(media::DecoderBaseTest, FlowControl);
@@ -83,7 +83,7 @@ ACTION(Initialize) {
ACTION_P(SaveDecodeRequest, list) {
scoped_refptr<Buffer> buffer = arg0;
- list->push_back(arg1);
+ list->push_back(buffer);
}
ACTION(CompleteDemuxRequest) {
@@ -121,11 +121,11 @@ TEST(DecoderBaseTest, FlowControl) {
// Read.
StrictMock<MockDecoderReadCallback> read_callback;
- std::vector<Task*> decode_requests;
+ std::vector<scoped_refptr<Buffer> > decode_requests;
EXPECT_CALL(*demuxer_stream, Read(NotNull()))
.Times(2)
.WillRepeatedly(CompleteDemuxRequest());
- EXPECT_CALL(*decoder, DoDecode(NotNull(), NotNull()))
+ EXPECT_CALL(*decoder, DoDecode(NotNull()))
.Times(2)
.WillRepeatedly(SaveDecodeRequest(&decode_requests));
decoder->Read(
@@ -140,7 +140,7 @@ TEST(DecoderBaseTest, FlowControl) {
EXPECT_CALL(read_callback, ReadCallback(NotNull())).Times(2);
for (size_t i = 0; i < decode_requests.size(); ++i) {
decoder->EnqueueResult(new MockDecoderOutput());
- AutoTaskRunner done_cb(decode_requests[i]);
+ decoder->OnDecodeComplete();
}
decode_requests.clear();
message_loop.RunAllPending();
diff --git a/media/filters/ffmpeg_audio_decoder.cc b/media/filters/ffmpeg_audio_decoder.cc
index 7c3e206f..6d4d512 100644
--- a/media/filters/ffmpeg_audio_decoder.cc
+++ b/media/filters/ffmpeg_audio_decoder.cc
@@ -154,9 +154,7 @@ static void ConvertAudioF32ToS32(void* buffer, int buffer_size) {
}
}
-void FFmpegAudioDecoder::DoDecode(Buffer* input, Task* done_cb) {
- AutoTaskRunner done_runner(done_cb);
-
+void FFmpegAudioDecoder::DoDecode(Buffer* input) {
// Due to FFmpeg API changes we no longer have const read-only pointers.
AVPacket packet;
av_init_packet(&packet);
@@ -185,6 +183,7 @@ void FFmpegAudioDecoder::DoDecode(Buffer* input, Task* done_cb) {
<< input->GetDuration().InMicroseconds() << " us"
<< " , packet size: "
<< input->GetDataSize() << " bytes";
+ DecoderBase<AudioDecoder, Buffer>::OnDecodeComplete();
return;
}
@@ -215,6 +214,7 @@ void FFmpegAudioDecoder::DoDecode(Buffer* input, Task* done_cb) {
}
EnqueueResult(result_buffer);
+ DecoderBase<AudioDecoder, Buffer>::OnDecodeComplete();
return;
}
@@ -225,6 +225,7 @@ void FFmpegAudioDecoder::DoDecode(Buffer* input, Task* done_cb) {
input->GetTimestamp() != StreamSample::kInvalidTimestamp &&
input->GetDuration() != StreamSample::kInvalidTimestamp) {
estimated_next_timestamp_ = input->GetTimestamp() + input->GetDuration();
+ DecoderBase<AudioDecoder, Buffer>::OnDecodeComplete();
return;
}
@@ -238,6 +239,7 @@ void FFmpegAudioDecoder::DoDecode(Buffer* input, Task* done_cb) {
result_buffer->SetDuration(input->GetDuration());
EnqueueResult(result_buffer);
}
+ DecoderBase<AudioDecoder, Buffer>::OnDecodeComplete();
}
base::TimeDelta FFmpegAudioDecoder::CalculateDuration(size_t size) {
diff --git a/media/filters/ffmpeg_audio_decoder.h b/media/filters/ffmpeg_audio_decoder.h
index f5ff8ac..d0ad27f 100644
--- a/media/filters/ffmpeg_audio_decoder.h
+++ b/media/filters/ffmpeg_audio_decoder.h
@@ -29,7 +29,7 @@ class FFmpegAudioDecoder : public DecoderBase<AudioDecoder, Buffer> {
virtual void DoSeek(base::TimeDelta time, Task* done_cb);
- virtual void DoDecode(Buffer* input, Task* done_cb);
+ virtual void DoDecode(Buffer* input);
private:
friend class FilterFactoryImpl0<FFmpegAudioDecoder>;
diff --git a/media/filters/ffmpeg_video_decode_engine.cc b/media/filters/ffmpeg_video_decode_engine.cc
index 3313c27..2e18f19 100644
--- a/media/filters/ffmpeg_video_decode_engine.cc
+++ b/media/filters/ffmpeg_video_decode_engine.cc
@@ -24,9 +24,16 @@ FFmpegVideoDecodeEngine::FFmpegVideoDecodeEngine()
FFmpegVideoDecodeEngine::~FFmpegVideoDecodeEngine() {
}
-void FFmpegVideoDecodeEngine::Initialize(AVStream* stream, Task* done_cb) {
+void FFmpegVideoDecodeEngine::Initialize(
+ MessageLoop* message_loop,
+ AVStream* av_stream,
+ EmptyThisBufferCallback* empty_buffer_callback,
+ FillThisBufferCallback* fill_buffer_callback,
+ Task* done_cb) {
AutoTaskRunner done_runner(done_cb);
CHECK(state_ == kCreated);
+ // TODO(jiesun): empty_buffer_callback is not used yet until we had path to re
+ fill_this_buffer_callback_.reset(fill_buffer_callback);
// Always try to use three threads for video decoding. There is little reason
// not to since current day CPUs tend to be multi-core and we measured
@@ -41,6 +48,7 @@ void FFmpegVideoDecodeEngine::Initialize(AVStream* stream, Task* done_cb) {
static const int kMaxDecodeThreads = 16;
+ AVStream* stream = av_stream;
codec_context_ = stream->codec;
codec_context_->flags2 |= CODEC_FLAG2_FAST; // Enable faster H264 decode.
// Enable motion vector search (potentially slow), strong deblocking filter
@@ -102,15 +110,14 @@ static void CopyPlane(size_t plane,
}
}
-// Decodes one frame of video with the given buffer.
-void FFmpegVideoDecodeEngine::DecodeFrame(
- Buffer* buffer,
- scoped_refptr<VideoFrame>* video_frame,
- bool* got_frame,
- Task* done_cb) {
- AutoTaskRunner done_runner(done_cb);
- *got_frame = false;
- *video_frame = NULL;
+void FFmpegVideoDecodeEngine::EmptyThisBuffer(
+ scoped_refptr<Buffer> buffer) {
+ DecodeFrame(buffer);
+}
+
+// Try to decode frame when both input and output are ready.
+void FFmpegVideoDecodeEngine::DecodeFrame(scoped_refptr<Buffer> buffer) {
+ scoped_refptr<VideoFrame> video_frame;
// Create a packet for input data.
// Due to FFmpeg API changes we no longer have const read-only pointers.
@@ -136,13 +143,13 @@ void FFmpegVideoDecodeEngine::DecodeFrame(
<< buffer->GetDuration().InMicroseconds() << " us"
<< " , packet size: "
<< buffer->GetDataSize() << " bytes";
- *got_frame = false;
+ fill_this_buffer_callback_->Run(video_frame);
return;
}
// If frame_decoded == 0, then no frame was produced.
- *got_frame = frame_decoded != 0;
- if (!*got_frame) {
+ if (frame_decoded == 0) {
+ fill_this_buffer_callback_->Run(video_frame);
return;
}
@@ -153,7 +160,7 @@ void FFmpegVideoDecodeEngine::DecodeFrame(
!av_frame_->data[VideoFrame::kUPlane] ||
!av_frame_->data[VideoFrame::kVPlane]) {
// TODO(jiesun): this is also an error case handled as normal.
- *got_frame = false;
+ fill_this_buffer_callback_->Run(video_frame);
return;
}
@@ -175,10 +182,10 @@ void FFmpegVideoDecodeEngine::DecodeFrame(
codec_context_->height,
timestamp,
duration,
- video_frame);
- if (!video_frame->get()) {
+ &video_frame);
+ if (!video_frame.get()) {
// TODO(jiesun): this is also an error case handled as normal.
- *got_frame = false;
+ fill_this_buffer_callback_->Run(video_frame);
return;
}
@@ -187,9 +194,11 @@ void FFmpegVideoDecodeEngine::DecodeFrame(
// avcodec_decode_video() call.
// TODO(scherkus): figure out pre-allocation/buffer cycling scheme.
// TODO(scherkus): is there a cleaner way to figure out the # of planes?
- CopyPlane(VideoFrame::kYPlane, video_frame->get(), av_frame_.get());
- CopyPlane(VideoFrame::kUPlane, video_frame->get(), av_frame_.get());
- CopyPlane(VideoFrame::kVPlane, video_frame->get(), av_frame_.get());
+ CopyPlane(VideoFrame::kYPlane, video_frame.get(), av_frame_.get());
+ CopyPlane(VideoFrame::kUPlane, video_frame.get(), av_frame_.get());
+ CopyPlane(VideoFrame::kVPlane, video_frame.get(), av_frame_.get());
+
+ fill_this_buffer_callback_->Run(video_frame);
}
void FFmpegVideoDecodeEngine::Flush(Task* done_cb) {
diff --git a/media/filters/ffmpeg_video_decode_engine.h b/media/filters/ffmpeg_video_decode_engine.h
index afafd58..bead1fc 100644
--- a/media/filters/ffmpeg_video_decode_engine.h
+++ b/media/filters/ffmpeg_video_decode_engine.h
@@ -25,10 +25,12 @@ class FFmpegVideoDecodeEngine : public VideoDecodeEngine {
virtual ~FFmpegVideoDecodeEngine();
// Implementation of the VideoDecodeEngine Interface.
- virtual void Initialize(AVStream* stream, Task* done_cb);
- virtual void DecodeFrame(Buffer* buffer,
- scoped_refptr<VideoFrame>* video_frame,
- bool* got_result, Task* done_cb);
+ virtual void Initialize(MessageLoop* message_loop,
+ AVStream* av_stream,
+ EmptyThisBufferCallback* empty_buffer_callback,
+ FillThisBufferCallback* fill_buffer_callback,
+ Task* done_cb);
+ virtual void EmptyThisBuffer(scoped_refptr<Buffer> buffer);
virtual void Flush(Task* done_cb);
virtual VideoFrame::Format GetSurfaceFormat() const;
@@ -41,9 +43,12 @@ class FFmpegVideoDecodeEngine : public VideoDecodeEngine {
}
private:
+ void DecodeFrame(scoped_refptr<Buffer> buffer);
+
AVCodecContext* codec_context_;
State state_;
scoped_ptr_malloc<AVFrame, ScopedPtrAVFree> av_frame_;
+ scoped_ptr<FillThisBufferCallback> fill_this_buffer_callback_;
DISALLOW_COPY_AND_ASSIGN(FFmpegVideoDecodeEngine);
};
diff --git a/media/filters/ffmpeg_video_decode_engine_unittest.cc b/media/filters/ffmpeg_video_decode_engine_unittest.cc
index 4d16a7e..12051c1 100644
--- a/media/filters/ffmpeg_video_decode_engine_unittest.cc
+++ b/media/filters/ffmpeg_video_decode_engine_unittest.cc
@@ -74,10 +74,21 @@ class FFmpegVideoDecodeEngineTest : public testing::Test {
TaskMocker done_cb;
EXPECT_CALL(done_cb, Run());
- test_engine_->Initialize(&stream_, done_cb.CreateTask());
+ test_engine_->Initialize(
+ MessageLoop::current(),
+ &stream_,
+ NULL,
+ NewCallback(this, &FFmpegVideoDecodeEngineTest::OnDecodeComplete),
+ done_cb.CreateTask());
EXPECT_EQ(VideoDecodeEngine::kNormal, test_engine_->state());
}
+ public:
+ MOCK_METHOD1(OnDecodeComplete,
+ void(scoped_refptr<VideoFrame> video_frame));
+
+ scoped_refptr<VideoFrame> video_frame_;
+ protected:
scoped_ptr<FFmpegVideoDecodeEngine> test_engine_;
scoped_array<uint8_t> frame_buffer_;
StrictMock<MockFFmpeg> mock_ffmpeg_;
@@ -111,7 +122,12 @@ TEST_F(FFmpegVideoDecodeEngineTest, Initialize_FindDecoderFails) {
TaskMocker done_cb;
EXPECT_CALL(done_cb, Run());
- test_engine_->Initialize(&stream_, done_cb.CreateTask());
+ test_engine_->Initialize(
+ MessageLoop::current(),
+ &stream_,
+ NULL,
+ NULL,
+ done_cb.CreateTask());
EXPECT_EQ(VideoDecodeEngine::kError, test_engine_->state());
}
@@ -129,7 +145,12 @@ TEST_F(FFmpegVideoDecodeEngineTest, Initialize_InitThreadFails) {
TaskMocker done_cb;
EXPECT_CALL(done_cb, Run());
- test_engine_->Initialize(&stream_, done_cb.CreateTask());
+ test_engine_->Initialize(
+ MessageLoop::current(),
+ &stream_,
+ NULL,
+ NULL,
+ done_cb.CreateTask());
EXPECT_EQ(VideoDecodeEngine::kError, test_engine_->state());
}
@@ -149,10 +170,19 @@ TEST_F(FFmpegVideoDecodeEngineTest, Initialize_OpenDecoderFails) {
TaskMocker done_cb;
EXPECT_CALL(done_cb, Run());
- test_engine_->Initialize(&stream_, done_cb.CreateTask());
+ test_engine_->Initialize(
+ MessageLoop::current(),
+ &stream_,
+ NULL,
+ NULL,
+ done_cb.CreateTask());
EXPECT_EQ(VideoDecodeEngine::kError, test_engine_->state());
}
+ACTION_P(DecodeComplete, decoder) {
+ decoder->video_frame_ = arg0;
+}
+
TEST_F(FFmpegVideoDecodeEngineTest, DecodeFrame_Normal) {
Initialize();
@@ -170,18 +200,13 @@ TEST_F(FFmpegVideoDecodeEngineTest, DecodeFrame_Normal) {
.WillOnce(DoAll(SetArgumentPointee<2>(1), // Simulate 1 byte frame.
Return(0)));
- TaskMocker done_cb;
- EXPECT_CALL(done_cb, Run());
-
- bool got_result;
- scoped_refptr<VideoFrame> video_frame;
- test_engine_->DecodeFrame(buffer_, &video_frame, &got_result,
- done_cb.CreateTask());
- EXPECT_TRUE(got_result);
+ EXPECT_CALL(*this, OnDecodeComplete(_))
+ .WillOnce(DecodeComplete(this));
+ test_engine_->EmptyThisBuffer(buffer_);
EXPECT_EQ(kTimestamp.InMicroseconds(),
- video_frame->GetTimestamp().ToInternalValue());
+ video_frame_->GetTimestamp().ToInternalValue());
EXPECT_EQ(kDuration.ToInternalValue(),
- video_frame->GetDuration().ToInternalValue());
+ video_frame_->GetDuration().ToInternalValue());
}
TEST_F(FFmpegVideoDecodeEngineTest, DecodeFrame_0ByteFrame) {
@@ -194,14 +219,10 @@ TEST_F(FFmpegVideoDecodeEngineTest, DecodeFrame_0ByteFrame) {
.WillOnce(DoAll(SetArgumentPointee<2>(0), // Simulate 0 byte frame.
Return(0)));
- TaskMocker done_cb;
- EXPECT_CALL(done_cb, Run());
-
- bool got_result;
- scoped_refptr<VideoFrame> video_frame;
- test_engine_->DecodeFrame(buffer_, &video_frame, &got_result,
- done_cb.CreateTask());
- EXPECT_FALSE(got_result);
+ EXPECT_CALL(*this, OnDecodeComplete(_))
+ .WillOnce(DecodeComplete(this));
+ test_engine_->EmptyThisBuffer(buffer_);
+ EXPECT_FALSE(video_frame_.get());
}
TEST_F(FFmpegVideoDecodeEngineTest, DecodeFrame_DecodeError) {
@@ -213,14 +234,10 @@ TEST_F(FFmpegVideoDecodeEngineTest, DecodeFrame_DecodeError) {
AVCodecDecodeVideo2(&codec_context_, &yuv_frame_, _, _))
.WillOnce(Return(-1));
- TaskMocker done_cb;
- EXPECT_CALL(done_cb, Run());
-
- bool got_result;
- scoped_refptr<VideoFrame> video_frame;
- test_engine_->DecodeFrame(buffer_, &video_frame, &got_result,
- done_cb.CreateTask());
- EXPECT_FALSE(got_result);
+ EXPECT_CALL(*this, OnDecodeComplete(_))
+ .WillOnce(DecodeComplete(this));
+ test_engine_->EmptyThisBuffer(buffer_);
+ EXPECT_FALSE(video_frame_.get());
}
TEST_F(FFmpegVideoDecodeEngineTest, GetSurfaceFormat) {
diff --git a/media/filters/omx_video_decode_engine.cc b/media/filters/omx_video_decode_engine.cc
index b8794f3..6d669f7 100644
--- a/media/filters/omx_video_decode_engine.cc
+++ b/media/filters/omx_video_decode_engine.cc
@@ -35,12 +35,19 @@ OmxVideoDecodeEngine::OmxVideoDecodeEngine()
OmxVideoDecodeEngine::~OmxVideoDecodeEngine() {
}
-void OmxVideoDecodeEngine::Initialize(AVStream* stream, Task* done_cb) {
+void OmxVideoDecodeEngine::Initialize(
+ MessageLoop* message_loop,
+ AVStream* av_stream,
+ EmptyThisBufferCallback* empty_buffer_callback,
+ FillThisBufferCallback* fill_buffer_callback,
+ Task* done_cb) {
DCHECK_EQ(message_loop_, MessageLoop::current());
+ fill_this_buffer_callback_.reset(fill_buffer_callback);
AutoTaskRunner done_runner(done_cb);
- omx_codec_ = new media::OmxCodec(message_loop_);
+ omx_codec_ = new media::OmxCodec(message_loop);
+ AVStream* stream = av_stream;
width_ = stream->codec->width;
height_ = stream->codec->height;
@@ -75,26 +82,8 @@ void OmxVideoDecodeEngine::OnHardwareError() {
state_ = kError;
}
-// This method assumes the input buffer contains exactly one frame or is an
-// end-of-stream buffer. The logic of this method and in OnReadComplete() is
-// based on this assumation.
-//
-// For every input buffer received here, we submit one read request to the
-// decoder. So when a read complete callback is received, a corresponding
-// decode request must exist.
-void OmxVideoDecodeEngine::DecodeFrame(Buffer* buffer,
- scoped_refptr<VideoFrame>* video_frame,
- bool* got_result,
- Task* done_cb) {
- DCHECK_EQ(message_loop_, MessageLoop::current());
- if (state_ != kNormal) {
- *got_result = false;
- *video_frame = NULL;
- done_cb->Run();
- delete done_cb;
- return;
- }
-
+void OmxVideoDecodeEngine::EmptyThisBuffer(scoped_refptr<Buffer> buffer) {
+ if ( state_ != kNormal ) return; // discard the buffer.
if (!has_fed_on_eos_) {
omx_codec_->Feed(buffer,
NewCallback(this, &OmxVideoDecodeEngine::OnFeedDone));
@@ -103,10 +92,20 @@ void OmxVideoDecodeEngine::DecodeFrame(Buffer* buffer,
has_fed_on_eos_ = true;
}
}
+ DecodeFrame();
+}
- // Enqueue the decode request and the associated buffer.
- decode_request_queue_.push_back(
- DecodeRequest(video_frame, got_result, done_cb));
+// This method assumes the input buffer contains exactly one frame or is an
+// end-of-stream buffer. The logic of this method and in OnReadComplete() is
+// based on this assumation.
+//
+// For every input buffer received here, we submit one read request to the
+// decoder. So when a read complete callback is received, a corresponding
+// decode request must exist.
+void OmxVideoDecodeEngine::DecodeFrame() {
+ DCHECK_EQ(message_loop_, MessageLoop::current());
+ if (state_ != kNormal)
+ return;
// Submit a read request to the decoder.
omx_codec_->Read(NewCallback(this, &OmxVideoDecodeEngine::OnReadComplete));
@@ -138,41 +137,31 @@ void OmxVideoDecodeEngine::OnReadComplete(
DCHECK_EQ(message_loop_, MessageLoop::current());
DCHECK_EQ(buffer->nFilledLen, width_*height_*3/2 );
- // We assume that when we receive a read complete callback from
- // OmxCodec there was a read request made.
- CHECK(!decode_request_queue_.empty());
- DecodeRequest request = decode_request_queue_.front();
- decode_request_queue_.pop_front();
- *request.got_result = false;
-
- // Notify the read request to this object has been fulfilled.
- AutoTaskRunner done_cb_runner(request.done_cb);
-
if (!buffer) // EOF signal by OmxCodec
return;
+ scoped_refptr<VideoFrame> frame;
VideoFrame::CreateFrame(GetSurfaceFormat(),
width_, height_,
StreamSample::kInvalidTimestamp,
StreamSample::kInvalidTimestamp,
- request.frame);
- if (!request.frame->get()) {
+ &frame);
+ if (!frame.get()) {
// TODO(jiesun): this is also an error case handled as normal.
return;
}
- // Now we had everything ready.
- *request.got_result = true;
-
// TODO(jiesun): Assume YUV 420 format.
// TODO(jiesun): We will use VideoFrame to wrap OMX_BUFFERHEADTYPE.
const int pixels = width_ * height_;
- memcpy((*request.frame)->data(VideoFrame::kYPlane), buffer->pBuffer, pixels);
- memcpy((*request.frame)->data(VideoFrame::kUPlane), buffer->pBuffer + pixels,
+ memcpy(frame->data(VideoFrame::kYPlane), buffer->pBuffer, pixels);
+ memcpy(frame->data(VideoFrame::kUPlane), buffer->pBuffer + pixels,
pixels / 4);
- memcpy((*request.frame)->data(VideoFrame::kVPlane),
+ memcpy(frame->data(VideoFrame::kVPlane),
buffer->pBuffer + pixels + pixels /4,
pixels / 4);
+
+ fill_this_buffer_callback_->Run(frame);
}
} // namespace media
diff --git a/media/filters/omx_video_decode_engine.h b/media/filters/omx_video_decode_engine.h
index 756ccc1..f245c03 100644
--- a/media/filters/omx_video_decode_engine.h
+++ b/media/filters/omx_video_decode_engine.h
@@ -31,10 +31,12 @@ class OmxVideoDecodeEngine : public VideoDecodeEngine {
virtual ~OmxVideoDecodeEngine();
// Implementation of the VideoDecodeEngine Interface.
- virtual void Initialize(AVStream* stream, Task* done_cb);
- virtual void DecodeFrame(Buffer* buffer,
- scoped_refptr<VideoFrame>* video_frame,
- bool* got_result, Task* done_cb);
+ virtual void Initialize(MessageLoop* message_loop,
+ AVStream* av_stream,
+ EmptyThisBufferCallback* empty_buffer_callback,
+ FillThisBufferCallback* fill_buffer_callback,
+ Task* done_cb);
+ virtual void EmptyThisBuffer(scoped_refptr<Buffer> buffer);
virtual void Flush(Task* done_cb);
virtual VideoFrame::Format GetSurfaceFormat() const;
@@ -51,20 +53,7 @@ class OmxVideoDecodeEngine : public VideoDecodeEngine {
};
private:
- // A struct to hold parameters of a decode request. Objects pointed by
- // these parameters are owned by the caller.
- struct DecodeRequest {
- DecodeRequest(scoped_refptr<VideoFrame>* f, bool* b, Task* cb)
- : frame(f),
- got_result(b),
- done_cb(cb) {
- }
-
- scoped_refptr<VideoFrame>* frame;
- bool* got_result;
- Task* done_cb;
- };
-
+ void DecodeFrame();
virtual void OnFeedDone(Buffer* buffer);
virtual void OnHardwareError();
virtual void OnReadComplete(OMX_BUFFERHEADERTYPE* buffer);
@@ -80,11 +69,11 @@ class OmxVideoDecodeEngine : public VideoDecodeEngine {
bool has_fed_on_eos_; // Used to avoid sending an end of stream to
// OpenMax twice since OpenMax does not always
// handle this nicely.
- std::list<DecodeRequest> decode_request_queue_;
scoped_refptr<media::OmxCodec> omx_codec_;
scoped_ptr<media::OmxConfigurator> omx_configurator_;
MessageLoop* message_loop_;
+ scoped_ptr<FillThisBufferCallback> fill_this_buffer_callback_;
DISALLOW_COPY_AND_ASSIGN(OmxVideoDecodeEngine);
};
diff --git a/media/filters/video_decode_engine.h b/media/filters/video_decode_engine.h
index 975ecea..2cc767a 100644
--- a/media/filters/video_decode_engine.h
+++ b/media/filters/video_decode_engine.h
@@ -5,12 +5,13 @@
#ifndef MEDIA_FILTERS_VIDEO_DECODE_ENGINE_H_
#define MEDIA_FILTERS_VIDEO_DECODE_ENGINE_H_
+#include "base/callback.h"
+#include "base/message_loop.h"
#include "media/base/video_frame.h"
// FFmpeg types.
//
// TODO(ajwong): Try to cut the dependency on the FFmpeg types.
-struct AVFrame;
struct AVStream;
class Task;
@@ -31,18 +32,34 @@ class VideoDecodeEngine {
VideoDecodeEngine() {}
virtual ~VideoDecodeEngine() {}
+ typedef Callback1<scoped_refptr<Buffer> >::Type EmptyThisBufferCallback;
+ typedef Callback1<scoped_refptr<VideoFrame> >::Type FillThisBufferCallback;
+
// Initialized the engine. On successful Initialization, state() should
// return kNormal.
- virtual void Initialize(AVStream* stream, Task* done_cb) = 0;
-
- // Decodes one frame of video with the given buffer. Returns false if there
- // was a decode error, or a zero-byte frame was produced.
- //
- // TODO(ajwong): Should this function just allocate a new yuv_frame and return
- // it via a "GetNextFrame()" method or similar?
- virtual void DecodeFrame(Buffer* buffer,
- scoped_refptr<VideoFrame>* video_frame,
- bool* got_result, Task* done_cb) = 0;
+ virtual void Initialize(MessageLoop* message_loop,
+ AVStream* av_stream,
+ EmptyThisBufferCallback* empty_buffer_callback,
+ FillThisBufferCallback* fill_buffer_callback,
+ Task* done_cb) = 0;
+
+ // These functions and callbacks could be used in two scenarios for both
+ // input and output streams:
+ // 1. Engine provide buffers.
+ // 2. Outside party provide buffers.
+ // The currently planned engine implementation:
+ // 1. provides the input buffer request inside engine through
+ // |EmptyThisBufferCallback|. The engine implementation has better knowledge
+ // of the decoder reordering delay and jittery removal requirements. Input
+ // buffers are returned into engine through |EmptyThisBuffer|.
+ // 2. Output buffers are provided from outside the engine, and feed into
+ // engine through |FillThisBuffer|. Output buffers are returned to outside
+ // by |FillThisBufferCallback|.
+
+ virtual void EmptyThisBuffer(scoped_refptr<Buffer> buffer) = 0;
+ virtual void FillThisBuffer(scoped_refptr<VideoFrame> frame) {
+ NOTREACHED();
+ };
// Flushes the decode engine of any buffered input packets.
virtual void Flush(Task* done_cb) = 0;
diff --git a/media/filters/video_decoder_impl.cc b/media/filters/video_decoder_impl.cc
index 38f36dc..ca50e4c 100644
--- a/media/filters/video_decoder_impl.cc
+++ b/media/filters/video_decoder_impl.cc
@@ -59,7 +59,10 @@ void VideoDecoderImpl::DoInitialize(DemuxerStream* demuxer_stream,
media_format_.SetAsInteger(MediaFormat::kHeight, height_);
decode_engine_->Initialize(
+ message_loop(),
av_stream,
+ NULL,
+ NewCallback(this, &VideoDecoderImpl::OnDecodeComplete),
NewRunnableMethod(this,
&VideoDecoderImpl::OnInitializeComplete,
success,
@@ -87,9 +90,7 @@ void VideoDecoderImpl::DoSeek(base::TimeDelta time, Task* done_cb) {
decode_engine_->Flush(done_cb);
}
-void VideoDecoderImpl::DoDecode(Buffer* buffer, Task* done_cb) {
- AutoTaskRunner done_runner(done_cb);
-
+void VideoDecoderImpl::DoDecode(Buffer* buffer) {
// TODO(ajwong): This DoDecode() and OnDecodeComplete() set of functions is
// too complicated to easily unittest. The test becomes fragile. Try to
// find a way to reorganize into smaller units for testing.
@@ -122,6 +123,7 @@ void VideoDecoderImpl::DoDecode(Buffer* buffer, Task* done_cb) {
// If the decoding is finished, we just always return empty frames.
if (state_ == kDecodeFinished) {
EnqueueEmptyFrame();
+ OnEmptyBufferDone();
return;
}
@@ -141,31 +143,14 @@ void VideoDecoderImpl::DoDecode(Buffer* buffer, Task* done_cb) {
}
// Otherwise, attempt to decode a single frame.
- bool* got_frame = new bool;
- scoped_refptr<VideoFrame>* video_frame = new scoped_refptr<VideoFrame>(NULL);
- decode_engine_->DecodeFrame(
- buffer,
- video_frame,
- got_frame,
- NewRunnableMethod(this,
- &VideoDecoderImpl::OnDecodeComplete,
- video_frame,
- got_frame,
- done_runner.release()));
+ decode_engine_->EmptyThisBuffer(buffer);
}
-void VideoDecoderImpl::OnDecodeComplete(scoped_refptr<VideoFrame>* video_frame,
- bool* got_frame, Task* done_cb) {
- // Note: The |done_runner| must be declared *last* to ensure proper
- // destruction order.
- scoped_ptr<bool> got_frame_deleter(got_frame);
- scoped_ptr<scoped_refptr<VideoFrame> > video_frame_deleter(video_frame);
- AutoTaskRunner done_runner(done_cb);
-
- // If we actually got data back, enqueue a frame.
- if (*got_frame) {
+void VideoDecoderImpl::OnDecodeComplete(scoped_refptr<VideoFrame> video_frame) {
+ if (video_frame.get()) {
+ // If we actually got data back, enqueue a frame.
last_pts_ = FindPtsAndDuration(*time_base_, pts_heap_, last_pts_,
- video_frame->get());
+ video_frame.get());
// Pop off a pts on a successful decode since we are "using up" one
// timestamp.
@@ -181,9 +166,9 @@ void VideoDecoderImpl::OnDecodeComplete(scoped_refptr<VideoFrame>* video_frame,
NOTREACHED() << "Attempting to decode more frames than were input.";
}
- (*video_frame)->SetTimestamp(last_pts_.timestamp);
- (*video_frame)->SetDuration(last_pts_.duration);
- EnqueueVideoFrame(*video_frame);
+ video_frame->SetTimestamp(last_pts_.timestamp);
+ video_frame->SetDuration(last_pts_.duration);
+ EnqueueVideoFrame(video_frame);
} else {
// When in kFlushCodec, any errored decode, or a 0-lengthed frame,
// is taken as a signal to stop decoding.
@@ -192,6 +177,14 @@ void VideoDecoderImpl::OnDecodeComplete(scoped_refptr<VideoFrame>* video_frame,
EnqueueEmptyFrame();
}
}
+
+ OnEmptyBufferDone();
+}
+
+void VideoDecoderImpl::OnEmptyBufferDone() {
+ // TODO(jiesun): what |DecodeBase::OnDecodeComplete| done is just
+ // what should be done in EmptyThisBufferCallback.
+ DecoderBase<VideoDecoder, VideoFrame>::OnDecodeComplete();
}
void VideoDecoderImpl::EnqueueVideoFrame(
diff --git a/media/filters/video_decoder_impl.h b/media/filters/video_decoder_impl.h
index e63f961..6d47b3d 100644
--- a/media/filters/video_decoder_impl.h
+++ b/media/filters/video_decoder_impl.h
@@ -34,7 +34,10 @@ class VideoDecoderImpl : public DecoderBase<VideoDecoder, VideoFrame> {
virtual void DoInitialize(DemuxerStream* demuxer_stream, bool* success,
Task* done_cb);
virtual void DoSeek(base::TimeDelta time, Task* done_cb);
- virtual void DoDecode(Buffer* buffer, Task* done_cb);
+ virtual void DoDecode(Buffer* input);
+
+ protected:
+ virtual void OnEmptyBufferDone();
private:
friend class FilterFactoryImpl1<VideoDecoderImpl, VideoDecodeEngine*>;
@@ -68,8 +71,8 @@ class VideoDecoderImpl : public DecoderBase<VideoDecoder, VideoFrame> {
// Methods that pickup after the decode engine has finished its action.
virtual void OnInitializeComplete(bool* success /* Not owned */,
Task* done_cb);
- virtual void OnDecodeComplete(scoped_refptr<VideoFrame>* video_frame,
- bool* got_frame, Task* done_cb);
+
+ virtual void OnDecodeComplete(scoped_refptr<VideoFrame> video_frame);
// Attempt to get the PTS and Duration for this frame by examining the time
// info provided via packet stream (stored in |pts_heap|), or the info
diff --git a/media/filters/video_decoder_impl_unittest.cc b/media/filters/video_decoder_impl_unittest.cc
index 5bef51c..174b185 100644
--- a/media/filters/video_decoder_impl_unittest.cc
+++ b/media/filters/video_decoder_impl_unittest.cc
@@ -29,6 +29,7 @@ using ::testing::ReturnNull;
using ::testing::SetArgumentPointee;
using ::testing::StrictMock;
using ::testing::WithArg;
+using ::testing::Invoke;
namespace media {
@@ -49,10 +50,13 @@ class MockFFmpegDemuxerStream : public MockDemuxerStream,
class MockVideoDecodeEngine : public VideoDecodeEngine {
public:
- MOCK_METHOD2(Initialize, void(AVStream* stream, Task* done_cb));
- MOCK_METHOD4(DecodeFrame, void(Buffer* buffer,
- scoped_refptr<VideoFrame>* video_frame,
- bool* got_result, Task* done_cb));
+ MOCK_METHOD5(Initialize, void(MessageLoop* message_loop,
+ AVStream* stream,
+ EmptyThisBufferCallback* empty_buffer_callback,
+ FillThisBufferCallback* fill_buffer_callback,
+ Task* done_cb));
+ MOCK_METHOD1(EmptyThisBuffer, void(scoped_refptr<Buffer> buffer));
+ MOCK_METHOD1(FillThisBuffer, void(scoped_refptr<VideoFrame> buffer));
MOCK_METHOD1(Flush, void(Task* done_cb));
MOCK_CONST_METHOD0(state, State());
MOCK_CONST_METHOD0(GetSurfaceFormat, VideoFrame::Format());
@@ -68,15 +72,18 @@ class DecoderPrivateMock : public VideoDecoderImpl {
MOCK_METHOD1(EnqueueVideoFrame,
void(const scoped_refptr<VideoFrame>& video_frame));
MOCK_METHOD0(EnqueueEmptyFrame, void());
- MOCK_METHOD3(CopyPlane, void(size_t plane,
- const scoped_refptr<VideoFrame> video_frame,
- const AVFrame* frame));
MOCK_METHOD4(FindPtsAndDuration, TimeTuple(
const AVRational& time_base,
const PtsHeap& pts_heap,
const TimeTuple& last_pts,
const VideoFrame* frame));
MOCK_METHOD0(SignalPipelineError, void());
+ MOCK_METHOD0(OnEmptyBufferDone, void());
+
+ // change access qualifier for test: used in actions.
+ void OnDecodeComplete(scoped_refptr<VideoFrame> video_frame) {
+ VideoDecoderImpl::OnDecodeComplete(video_frame);
+ }
};
// Fixture class to facilitate writing tests. Takes care of setting up the
@@ -207,8 +214,8 @@ TEST_F(VideoDecoderImplTest, Initialize_EngineFails) {
EXPECT_CALL(*demuxer_, GetAVStream())
.WillOnce(Return(&stream_));
- EXPECT_CALL(*engine_, Initialize(_, _))
- .WillOnce(WithArg<1>(InvokeRunnable()));
+ EXPECT_CALL(*engine_, Initialize(_, _, _, _, _))
+ .WillOnce(WithArg<4>(InvokeRunnable()));
EXPECT_CALL(*engine_, state())
.WillOnce(Return(VideoDecodeEngine::kError));
@@ -229,8 +236,8 @@ TEST_F(VideoDecoderImplTest, Initialize_Successful) {
EXPECT_CALL(*demuxer_, GetAVStream())
.WillOnce(Return(&stream_));
- EXPECT_CALL(*engine_, Initialize(_, _))
- .WillOnce(WithArg<1>(InvokeRunnable()));
+ EXPECT_CALL(*engine_, Initialize(_, _, _, _, _))
+ .WillOnce(WithArg<4>(InvokeRunnable()));
EXPECT_CALL(*engine_, state())
.WillOnce(Return(VideoDecodeEngine::kNormal));
@@ -326,6 +333,15 @@ TEST_F(VideoDecoderImplTest, FindPtsAndDuration) {
EXPECT_EQ(789, result_pts.duration.InMicroseconds());
}
+ACTION_P2(DecodeComplete, decoder, video_frame) {
+ decoder->OnDecodeComplete(video_frame);
+}
+
+ACTION_P2(DecodeNotComplete, decoder, video_frame) {
+ scoped_refptr<VideoFrame> null_frame;
+ decoder->OnDecodeComplete(null_frame);
+}
+
TEST_F(VideoDecoderImplTest, DoDecode_TestStateTransition) {
// Simulates a input sequence of three buffers, and six decode requests to
// exercise the state transitions, and bookkeeping logic of DoDecode.
@@ -343,22 +359,13 @@ TEST_F(VideoDecoderImplTest, DoDecode_TestStateTransition) {
// Setup decoder to buffer one frame, decode one frame, fail one frame,
// decode one more, and then fail the last one to end decoding.
- EXPECT_CALL(*mock_engine, DecodeFrame(_, _, _,_))
- .WillOnce(DoAll(SetArgumentPointee<2>(false),
- WithArg<3>(InvokeRunnable())))
- .WillOnce(DoAll(SetArgumentPointee<1>(video_frame_),
- SetArgumentPointee<2>(true),
- WithArg<3>(InvokeRunnable())))
- .WillOnce(DoAll(SetArgumentPointee<2>(false),
- WithArg<3>(InvokeRunnable())))
- .WillOnce(DoAll(SetArgumentPointee<1>(video_frame_),
- SetArgumentPointee<2>(true),
- WithArg<3>(InvokeRunnable())))
- .WillOnce(DoAll(SetArgumentPointee<1>(video_frame_),
- SetArgumentPointee<2>(true),
- WithArg<3>(InvokeRunnable())))
- .WillOnce(DoAll(SetArgumentPointee<2>(false),
- WithArg<3>(InvokeRunnable())));
+ EXPECT_CALL(*mock_engine, EmptyThisBuffer(_))
+ .WillOnce(DecodeNotComplete(mock_decoder.get(), video_frame_))
+ .WillOnce(DecodeComplete(mock_decoder.get(), video_frame_))
+ .WillOnce(DecodeNotComplete(mock_decoder.get(), video_frame_))
+ .WillOnce(DecodeComplete(mock_decoder.get(), video_frame_))
+ .WillOnce(DecodeComplete(mock_decoder.get(), video_frame_))
+ .WillOnce(DecodeNotComplete(mock_decoder.get(), video_frame_));
EXPECT_CALL(*mock_decoder, FindPtsAndDuration(_, _, _, _))
.WillOnce(Return(kTestPts1))
.WillOnce(Return(kTestPts2))
@@ -367,25 +374,22 @@ TEST_F(VideoDecoderImplTest, DoDecode_TestStateTransition) {
.Times(3);
EXPECT_CALL(*mock_decoder, EnqueueEmptyFrame())
.Times(1);
-
- // Setup callbacks to be executed 6 times.
- TaskMocker done_cb;
- EXPECT_CALL(done_cb, Run()).Times(6);
+ EXPECT_CALL(*mock_decoder, OnEmptyBufferDone())
+ .Times(6);
// Setup initial state and check that it is sane.
ASSERT_EQ(VideoDecoderImpl::kNormal, mock_decoder->state_);
ASSERT_TRUE(base::TimeDelta() == mock_decoder->last_pts_.timestamp);
ASSERT_TRUE(base::TimeDelta() == mock_decoder->last_pts_.duration);
-
// Decode once, which should simulate a buffering call.
- mock_decoder->DoDecode(buffer_, done_cb.CreateTask());
+ mock_decoder->DoDecode(buffer_);
EXPECT_EQ(VideoDecoderImpl::kNormal, mock_decoder->state_);
ASSERT_TRUE(base::TimeDelta() == mock_decoder->last_pts_.timestamp);
ASSERT_TRUE(base::TimeDelta() == mock_decoder->last_pts_.duration);
EXPECT_FALSE(mock_decoder->pts_heap_.IsEmpty());
// Decode a second time, which should yield the first frame.
- mock_decoder->DoDecode(buffer_, done_cb.CreateTask());
+ mock_decoder->DoDecode(buffer_);
EXPECT_EQ(VideoDecoderImpl::kNormal, mock_decoder->state_);
EXPECT_TRUE(kTestPts1.timestamp == mock_decoder->last_pts_.timestamp);
EXPECT_TRUE(kTestPts1.duration == mock_decoder->last_pts_.duration);
@@ -393,7 +397,7 @@ TEST_F(VideoDecoderImplTest, DoDecode_TestStateTransition) {
// Decode a third time, with a regular buffer. The decode will error
// out, but the state should be the same.
- mock_decoder->DoDecode(buffer_, done_cb.CreateTask());
+ mock_decoder->DoDecode(buffer_);
EXPECT_EQ(VideoDecoderImpl::kNormal, mock_decoder->state_);
EXPECT_TRUE(kTestPts1.timestamp == mock_decoder->last_pts_.timestamp);
EXPECT_TRUE(kTestPts1.duration == mock_decoder->last_pts_.duration);
@@ -401,7 +405,7 @@ TEST_F(VideoDecoderImplTest, DoDecode_TestStateTransition) {
// Decode a fourth time, with an end of stream buffer. This should
// yield the second frame, and stay in flushing mode.
- mock_decoder->DoDecode(end_of_stream_buffer_, done_cb.CreateTask());
+ mock_decoder->DoDecode(end_of_stream_buffer_);
EXPECT_EQ(VideoDecoderImpl::kFlushCodec, mock_decoder->state_);
EXPECT_TRUE(kTestPts2.timestamp == mock_decoder->last_pts_.timestamp);
EXPECT_TRUE(kTestPts2.duration == mock_decoder->last_pts_.duration);
@@ -409,7 +413,7 @@ TEST_F(VideoDecoderImplTest, DoDecode_TestStateTransition) {
// Decode a fifth time with an end of stream buffer. this should
// yield the third frame.
- mock_decoder->DoDecode(end_of_stream_buffer_, done_cb.CreateTask());
+ mock_decoder->DoDecode(end_of_stream_buffer_);
EXPECT_EQ(VideoDecoderImpl::kFlushCodec, mock_decoder->state_);
EXPECT_TRUE(kTestPts1.timestamp == mock_decoder->last_pts_.timestamp);
EXPECT_TRUE(kTestPts1.duration == mock_decoder->last_pts_.duration);
@@ -417,7 +421,7 @@ TEST_F(VideoDecoderImplTest, DoDecode_TestStateTransition) {
// Decode a sixth time with an end of stream buffer. This should
// Move into kDecodeFinished.
- mock_decoder->DoDecode(end_of_stream_buffer_, done_cb.CreateTask());
+ mock_decoder->DoDecode(end_of_stream_buffer_);
EXPECT_EQ(VideoDecoderImpl::kDecodeFinished, mock_decoder->state_);
EXPECT_TRUE(kTestPts1.timestamp == mock_decoder->last_pts_.timestamp);
EXPECT_TRUE(kTestPts1.duration == mock_decoder->last_pts_.duration);
@@ -435,15 +439,11 @@ TEST_F(VideoDecoderImplTest, DoDecode_FinishEnqueuesEmptyFrames) {
// Expect 2 calls, make two calls. If kDecodeFinished is set, the buffer is
// not even examined.
EXPECT_CALL(*mock_decoder, EnqueueEmptyFrame()).Times(3);
+ EXPECT_CALL(*mock_decoder, OnEmptyBufferDone()).Times(3);
- // Setup callbacks to be executed 3 times.
- TaskMocker done_cb;
- EXPECT_CALL(done_cb, Run()).Times(3);
-
- mock_decoder->DoDecode(NULL, done_cb.CreateTask());
- mock_decoder->DoDecode(buffer_, done_cb.CreateTask());
- mock_decoder->DoDecode(end_of_stream_buffer_, done_cb.CreateTask());
-
+ mock_decoder->DoDecode(NULL);
+ mock_decoder->DoDecode(buffer_);
+ mock_decoder->DoDecode(end_of_stream_buffer_);
EXPECT_EQ(VideoDecoderImpl::kDecodeFinished, mock_decoder->state_);
}