diff options
author | jiesun@google.com <jiesun@google.com@0039d316-1c4b-4281-b951-d872f2087c98> | 2010-04-23 16:24:46 +0000 |
---|---|---|
committer | jiesun@google.com <jiesun@google.com@0039d316-1c4b-4281-b951-d872f2087c98> | 2010-04-23 16:24:46 +0000 |
commit | f044ce8e4aeef63eb7686345f407bf5640045c18 (patch) | |
tree | 1e990d0c5cb32cecd1b130e1c1cbf158f0dc3ab3 /media | |
parent | 4b4762a2b618580183e2a80b66eae64cb500da75 (diff) | |
download | chromium_src-f044ce8e4aeef63eb7686345f407bf5640045c18.zip chromium_src-f044ce8e4aeef63eb7686345f407bf5640045c18.tar.gz chromium_src-f044ce8e4aeef63eb7686345f407bf5640045c18.tar.bz2 |
Remove AVFrame dependency.
Currently AVFrame is used in both OmxVideoDecodeEngine and FFmpegVideoDecodeEngine. This causes an unnecessary memory copy from AVFrame to VideoFrame. We can remove the additional copy by using VideoFrame.
Also in the hardware (OpenMAX) path, VideoFrames are allocated inside decode engines, therefore the interface was changed such that VideoFrames are created inside the decode engines but buffered in VideoDecoderImpl.
BUGS=none
TEST=none
Review URL: http://codereview.chromium.org/1669002
git-svn-id: svn://svn.chromium.org/chrome/trunk/src@45448 0039d316-1c4b-4281-b951-d872f2087c98
Diffstat (limited to 'media')
-rw-r--r-- | media/base/video_frame.cc | 1 | ||||
-rw-r--r-- | media/base/video_frame.h | 11 | ||||
-rw-r--r-- | media/filters/ffmpeg_video_decode_engine.cc | 92 | ||||
-rw-r--r-- | media/filters/ffmpeg_video_decode_engine.h | 6 | ||||
-rw-r--r-- | media/filters/ffmpeg_video_decode_engine_unittest.cc | 73 | ||||
-rw-r--r-- | media/filters/omx_video_decode_engine.cc | 50 | ||||
-rw-r--r-- | media/filters/omx_video_decode_engine.h | 8 | ||||
-rw-r--r-- | media/filters/video_decode_engine.h | 3 | ||||
-rw-r--r-- | media/filters/video_decoder_impl.cc | 116 | ||||
-rw-r--r-- | media/filters/video_decoder_impl.h | 14 | ||||
-rw-r--r-- | media/filters/video_decoder_impl_unittest.cc | 106 |
11 files changed, 250 insertions, 230 deletions
diff --git a/media/base/video_frame.cc b/media/base/video_frame.cc index 0b8239c..6767ee4 100644 --- a/media/base/video_frame.cc +++ b/media/base/video_frame.cc @@ -152,6 +152,7 @@ VideoFrame::VideoFrame(VideoFrame::Format format, width_ = width; height_ = height; planes_ = 0; + repeat_count_ = 0; memset(&strides_, 0, sizeof(strides_)); memset(&data_, 0, sizeof(data_)); } diff --git a/media/base/video_frame.h b/media/base/video_frame.h index b84d77c..5341c16 100644 --- a/media/base/video_frame.h +++ b/media/base/video_frame.h @@ -72,6 +72,14 @@ class VideoFrame : public StreamSample { // StreamSample interface. virtual bool IsEndOfStream() const; + int GetRepeatCount() const { + return repeat_count_; + } + + void SetRepeatCount(int repeat_count) { + repeat_count_ = repeat_count; + } + private: // Clients must use the static CreateFrame() method to create a new frame. VideoFrame(Format format, @@ -103,6 +111,9 @@ class VideoFrame : public StreamSample { // Array of data pointers to each plane. uint8* data_[kMaxPlanes]; + // Display meta data + int repeat_count_; + DISALLOW_COPY_AND_ASSIGN(VideoFrame); }; diff --git a/media/filters/ffmpeg_video_decode_engine.cc b/media/filters/ffmpeg_video_decode_engine.cc index 8685fbf..d13a247 100644 --- a/media/filters/ffmpeg_video_decode_engine.cc +++ b/media/filters/ffmpeg_video_decode_engine.cc @@ -5,8 +5,9 @@ #include "media/filters/ffmpeg_video_decode_engine.h" #include "base/task.h" -#include "media/base/callback.h" #include "media/base/buffers.h" +#include "media/base/callback.h" +#include "media/base/limits.h" #include "media/ffmpeg/ffmpeg_common.h" #include "media/ffmpeg/ffmpeg_util.h" #include "media/filters/ffmpeg_demuxer.h" @@ -50,21 +51,54 @@ void FFmpegVideoDecodeEngine::Initialize(AVStream* stream, Task* done_cb) { int decode_threads = (codec_context_->codec_id == CODEC_ID_THEORA) ? 1 : kDecodeThreads; + // We don't allocate AVFrame on the stack since different versions of FFmpeg + // may change the size of AVFrame, causing stack corruption. The solution is + // to let FFmpeg allocate the structure via avcodec_alloc_frame(). + av_frame_.reset(avcodec_alloc_frame()); + if (codec && avcodec_thread_init(codec_context_, decode_threads) >= 0 && - avcodec_open(codec_context_, codec) >= 0) { + avcodec_open(codec_context_, codec) >= 0 && + av_frame_.get()) { state_ = kNormal; } else { state_ = kError; } } +static void CopyPlane(size_t plane, + scoped_refptr<VideoFrame> video_frame, + const AVFrame* frame) { + DCHECK(video_frame->width() % 2 == 0); + const uint8* source = frame->data[plane]; + const size_t source_stride = frame->linesize[plane]; + uint8* dest = video_frame->data(plane); + const size_t dest_stride = video_frame->stride(plane); + size_t bytes_per_line = video_frame->width(); + size_t copy_lines = video_frame->height(); + if (plane != VideoFrame::kYPlane) { + bytes_per_line /= 2; + if (video_frame->format() == VideoFrame::YV12) { + copy_lines = (copy_lines + 1) / 2; + } + } + DCHECK(bytes_per_line <= source_stride && bytes_per_line <= dest_stride); + for (size_t i = 0; i < copy_lines; ++i) { + memcpy(dest, source, bytes_per_line); + source += source_stride; + dest += dest_stride; + } +} + // Decodes one frame of video with the given buffer. -void FFmpegVideoDecodeEngine::DecodeFrame(Buffer* buffer, - AVFrame* yuv_frame, - bool* got_frame, - Task* done_cb) { +void FFmpegVideoDecodeEngine::DecodeFrame( + Buffer* buffer, + scoped_refptr<VideoFrame>* video_frame, + bool* got_frame, + Task* done_cb) { AutoTaskRunner done_runner(done_cb); + *got_frame = false; + *video_frame = NULL; // Create a packet for input data. // Due to FFmpeg API changes we no longer have const read-only pointers. @@ -73,12 +107,11 @@ void FFmpegVideoDecodeEngine::DecodeFrame(Buffer* buffer, packet.data = const_cast<uint8*>(buffer->GetData()); packet.size = buffer->GetDataSize(); - // We don't allocate AVFrame on the stack since different versions of FFmpeg - // may change the size of AVFrame, causing stack corruption. The solution is - // to let FFmpeg allocate the structure via avcodec_alloc_frame(). int frame_decoded = 0; - int result = - avcodec_decode_video2(codec_context_, yuv_frame, &frame_decoded, &packet); + int result = avcodec_decode_video2(codec_context_, + av_frame_.get(), + &frame_decoded, + &packet); // Log the problem if we can't decode a video frame and exit early. if (result < 0) { @@ -92,6 +125,43 @@ void FFmpegVideoDecodeEngine::DecodeFrame(Buffer* buffer, } else { // If frame_decoded == 0, then no frame was produced. *got_frame = frame_decoded != 0; + + if (*got_frame) { + // TODO(fbarchard): Work around for FFmpeg http://crbug.com/27675 + // The decoder is in a bad state and not decoding correctly. + // Checking for NULL avoids a crash in CopyPlane(). + if (!av_frame_->data[VideoFrame::kYPlane] || + !av_frame_->data[VideoFrame::kUPlane] || + !av_frame_->data[VideoFrame::kVPlane]) { + // TODO(jiesun): this is also an error case handled as normal. + *got_frame = false; + return; + } + + VideoFrame::CreateFrame(GetSurfaceFormat(), + codec_context_->width, + codec_context_->height, + StreamSample::kInvalidTimestamp, + StreamSample::kInvalidTimestamp, + video_frame); + if (!video_frame->get()) { + // TODO(jiesun): this is also an error case handled as normal. + *got_frame = false; + return; + } + + // Copy the frame data since FFmpeg reuses internal buffers for AVFrame + // output, meaning the data is only valid until the next + // avcodec_decode_video() call. + // TODO(scherkus): figure out pre-allocation/buffer cycling scheme. + // TODO(scherkus): is there a cleaner way to figure out the # of planes? + CopyPlane(VideoFrame::kYPlane, video_frame->get(), av_frame_.get()); + CopyPlane(VideoFrame::kUPlane, video_frame->get(), av_frame_.get()); + CopyPlane(VideoFrame::kVPlane, video_frame->get(), av_frame_.get()); + + DCHECK_LE(av_frame_->repeat_pict, 2); // Sanity check. + (*video_frame)->SetRepeatCount(av_frame_->repeat_pict); + } } } diff --git a/media/filters/ffmpeg_video_decode_engine.h b/media/filters/ffmpeg_video_decode_engine.h index 8e5b2e3..afafd58 100644 --- a/media/filters/ffmpeg_video_decode_engine.h +++ b/media/filters/ffmpeg_video_decode_engine.h @@ -5,6 +5,8 @@ #ifndef MEDIA_FILTERS_FFMPEG_VIDEO_DECODE_ENGINE_H_ #define MEDIA_FILTERS_FFMPEG_VIDEO_DECODE_ENGINE_H_ +#include "base/scoped_ptr.h" +#include "media/ffmpeg/ffmpeg_common.h" #include "media/filters/video_decode_engine.h" // FFmpeg types. @@ -24,7 +26,8 @@ class FFmpegVideoDecodeEngine : public VideoDecodeEngine { // Implementation of the VideoDecodeEngine Interface. virtual void Initialize(AVStream* stream, Task* done_cb); - virtual void DecodeFrame(Buffer* buffer, AVFrame* yuv_frame, + virtual void DecodeFrame(Buffer* buffer, + scoped_refptr<VideoFrame>* video_frame, bool* got_result, Task* done_cb); virtual void Flush(Task* done_cb); virtual VideoFrame::Format GetSurfaceFormat() const; @@ -40,6 +43,7 @@ class FFmpegVideoDecodeEngine : public VideoDecodeEngine { private: AVCodecContext* codec_context_; State state_; + scoped_ptr_malloc<AVFrame, ScopedPtrAVFree> av_frame_; DISALLOW_COPY_AND_ASSIGN(FFmpegVideoDecodeEngine); }; diff --git a/media/filters/ffmpeg_video_decode_engine_unittest.cc b/media/filters/ffmpeg_video_decode_engine_unittest.cc index 75e0c29..533edd0 100644 --- a/media/filters/ffmpeg_video_decode_engine_unittest.cc +++ b/media/filters/ffmpeg_video_decode_engine_unittest.cc @@ -20,12 +20,23 @@ using ::testing::StrictMock; namespace media { +static const int kWidth = 320; +static const int kHeight = 240; + class FFmpegVideoDecodeEngineTest : public testing::Test { protected: FFmpegVideoDecodeEngineTest() { // Setup FFmpeg structures. + frame_buffer_.reset(new uint8[kWidth * kHeight]); memset(&yuv_frame_, 0, sizeof(yuv_frame_)); + // DecodeFrame will check these pointers as non-NULL value. + yuv_frame_.data[0] = yuv_frame_.data[1] = yuv_frame_.data[2] + = frame_buffer_.get(); + yuv_frame_.linesize[0] = kWidth; + yuv_frame_.linesize[1] = yuv_frame_.linesize[2] = kWidth >> 1; memset(&codec_context_, 0, sizeof(codec_context_)); + codec_context_.width = kWidth; + codec_context_.height = kHeight; memset(&codec_, 0, sizeof(codec_)); memset(&stream_, 0, sizeof(stream_)); stream_.codec = &codec_context_; @@ -40,11 +51,30 @@ class FFmpegVideoDecodeEngineTest : public testing::Test { } ~FFmpegVideoDecodeEngineTest() { + test_engine_.reset(NULL); MockFFmpeg::set(NULL); } - scoped_ptr<FFmpegVideoDecodeEngine> test_engine_; + void Initialize() { + EXPECT_CALL(*MockFFmpeg::get(), AVCodecFindDecoder(CODEC_ID_NONE)) + .WillOnce(Return(&codec_)); + EXPECT_CALL(*MockFFmpeg::get(), AVCodecAllocFrame()) + .WillOnce(Return(&yuv_frame_)); + EXPECT_CALL(*MockFFmpeg::get(), AVCodecThreadInit(&codec_context_, 2)) + .WillOnce(Return(0)); + EXPECT_CALL(*MockFFmpeg::get(), AVCodecOpen(&codec_context_, &codec_)) + .WillOnce(Return(0)); + EXPECT_CALL(*MockFFmpeg::get(), AVFree(&yuv_frame_)) + .Times(1); + + TaskMocker done_cb; + EXPECT_CALL(done_cb, Run()); + test_engine_->Initialize(&stream_, done_cb.CreateTask()); + EXPECT_EQ(VideoDecodeEngine::kNormal, test_engine_->state()); + } + scoped_ptr<FFmpegVideoDecodeEngine> test_engine_; + scoped_array<uint8_t> frame_buffer_; StrictMock<MockFFmpeg> mock_ffmpeg_; AVFrame yuv_frame_; @@ -61,25 +91,17 @@ TEST_F(FFmpegVideoDecodeEngineTest, Construction) { } TEST_F(FFmpegVideoDecodeEngineTest, Initialize_Normal) { - // Test avcodec_open() failing. - EXPECT_CALL(*MockFFmpeg::get(), AVCodecFindDecoder(CODEC_ID_NONE)) - .WillOnce(Return(&codec_)); - EXPECT_CALL(*MockFFmpeg::get(), AVCodecThreadInit(&codec_context_, 2)) - .WillOnce(Return(0)); - EXPECT_CALL(*MockFFmpeg::get(), AVCodecOpen(&codec_context_, &codec_)) - .WillOnce(Return(0)); - - TaskMocker done_cb; - EXPECT_CALL(done_cb, Run()); - - test_engine_->Initialize(&stream_, done_cb.CreateTask()); - EXPECT_EQ(VideoDecodeEngine::kNormal, test_engine_->state()); + Initialize(); } TEST_F(FFmpegVideoDecodeEngineTest, Initialize_FindDecoderFails) { // Test avcodec_find_decoder() returning NULL. EXPECT_CALL(*MockFFmpeg::get(), AVCodecFindDecoder(CODEC_ID_NONE)) .WillOnce(ReturnNull()); + EXPECT_CALL(*MockFFmpeg::get(), AVCodecAllocFrame()) + .WillOnce(Return(&yuv_frame_)); + EXPECT_CALL(*MockFFmpeg::get(), AVFree(&yuv_frame_)) + .Times(1); TaskMocker done_cb; EXPECT_CALL(done_cb, Run()); @@ -92,8 +114,12 @@ TEST_F(FFmpegVideoDecodeEngineTest, Initialize_InitThreadFails) { // Test avcodec_thread_init() failing. EXPECT_CALL(*MockFFmpeg::get(), AVCodecFindDecoder(CODEC_ID_NONE)) .WillOnce(Return(&codec_)); + EXPECT_CALL(*MockFFmpeg::get(), AVCodecAllocFrame()) + .WillOnce(Return(&yuv_frame_)); EXPECT_CALL(*MockFFmpeg::get(), AVCodecThreadInit(&codec_context_, 2)) .WillOnce(Return(-1)); + EXPECT_CALL(*MockFFmpeg::get(), AVFree(&yuv_frame_)) + .Times(1); TaskMocker done_cb; EXPECT_CALL(done_cb, Run()); @@ -106,10 +132,14 @@ TEST_F(FFmpegVideoDecodeEngineTest, Initialize_OpenDecoderFails) { // Test avcodec_open() failing. EXPECT_CALL(*MockFFmpeg::get(), AVCodecFindDecoder(CODEC_ID_NONE)) .WillOnce(Return(&codec_)); + EXPECT_CALL(*MockFFmpeg::get(), AVCodecAllocFrame()) + .WillOnce(Return(&yuv_frame_)); EXPECT_CALL(*MockFFmpeg::get(), AVCodecThreadInit(&codec_context_, 2)) .WillOnce(Return(0)); EXPECT_CALL(*MockFFmpeg::get(), AVCodecOpen(&codec_context_, &codec_)) .WillOnce(Return(-1)); + EXPECT_CALL(*MockFFmpeg::get(), AVFree(&yuv_frame_)) + .Times(1); TaskMocker done_cb; EXPECT_CALL(done_cb, Run()); @@ -119,6 +149,8 @@ TEST_F(FFmpegVideoDecodeEngineTest, Initialize_OpenDecoderFails) { } TEST_F(FFmpegVideoDecodeEngineTest, DecodeFrame_Normal) { + Initialize(); + // Expect a bunch of avcodec calls. EXPECT_CALL(mock_ffmpeg_, AVInitPacket(_)); EXPECT_CALL(mock_ffmpeg_, @@ -130,12 +162,15 @@ TEST_F(FFmpegVideoDecodeEngineTest, DecodeFrame_Normal) { EXPECT_CALL(done_cb, Run()); bool got_result; - test_engine_->DecodeFrame(buffer_, &yuv_frame_, &got_result, + scoped_refptr<VideoFrame> video_frame; + test_engine_->DecodeFrame(buffer_, &video_frame, &got_result, done_cb.CreateTask()); EXPECT_TRUE(got_result); } TEST_F(FFmpegVideoDecodeEngineTest, DecodeFrame_0ByteFrame) { + Initialize(); + // Expect a bunch of avcodec calls. EXPECT_CALL(mock_ffmpeg_, AVInitPacket(_)); EXPECT_CALL(mock_ffmpeg_, @@ -147,12 +182,15 @@ TEST_F(FFmpegVideoDecodeEngineTest, DecodeFrame_0ByteFrame) { EXPECT_CALL(done_cb, Run()); bool got_result; - test_engine_->DecodeFrame(buffer_, &yuv_frame_, &got_result, + scoped_refptr<VideoFrame> video_frame; + test_engine_->DecodeFrame(buffer_, &video_frame, &got_result, done_cb.CreateTask()); EXPECT_FALSE(got_result); } TEST_F(FFmpegVideoDecodeEngineTest, DecodeFrame_DecodeError) { + Initialize(); + // Expect a bunch of avcodec calls. EXPECT_CALL(mock_ffmpeg_, AVInitPacket(_)); EXPECT_CALL(mock_ffmpeg_, @@ -163,7 +201,8 @@ TEST_F(FFmpegVideoDecodeEngineTest, DecodeFrame_DecodeError) { EXPECT_CALL(done_cb, Run()); bool got_result; - test_engine_->DecodeFrame(buffer_, &yuv_frame_, &got_result, + scoped_refptr<VideoFrame> video_frame; + test_engine_->DecodeFrame(buffer_, &video_frame, &got_result, done_cb.CreateTask()); EXPECT_FALSE(got_result); } diff --git a/media/filters/omx_video_decode_engine.cc b/media/filters/omx_video_decode_engine.cc index 6b456b2..e0db841 100644 --- a/media/filters/omx_video_decode_engine.cc +++ b/media/filters/omx_video_decode_engine.cc @@ -91,7 +91,7 @@ void OmxVideoDecodeEngine::OnHardwareError() { // decoder. So when a read complete callback is received, a corresponding // decode request must exist. void OmxVideoDecodeEngine::DecodeFrame(Buffer* buffer, - AVFrame* yuv_frame, + scoped_refptr<VideoFrame>* video_frame, bool* got_result, Task* done_cb) { DCHECK_EQ(message_loop_, MessageLoop::current()); @@ -110,7 +110,7 @@ void OmxVideoDecodeEngine::DecodeFrame(Buffer* buffer, // Enqueue the decode request and the associated buffer. decode_request_queue_.push_back( - DecodeRequest(yuv_frame, got_result, done_cb)); + DecodeRequest(video_frame, got_result, done_cb)); // Submit a read request to the decoder. omx_codec_->Read(NewCallback(this, &OmxVideoDecodeEngine::OnReadComplete)); @@ -178,46 +178,48 @@ void OmxVideoDecodeEngine::BufferReady(int buffer_id, MergeBytesFrameQueue(buffer, size); } + // Notify OmxCodec that we have finished using this buffer. + if (callback) { + callback->Run(buffer_id); + delete callback; + } + // We assume that when we receive a read complete callback from // OmxCodec there was a read request made. CHECK(!decode_request_queue_.empty()); - const DecodeRequest request = decode_request_queue_.front(); + DecodeRequest request = decode_request_queue_.front(); decode_request_queue_.pop_front(); *request.got_result = false; + // Notify the read request to this object has been fulfilled. + AutoTaskRunner done_cb_runner(request.done_cb); + // Detect if we have received a full decoded frame. if (DecodedFrameAvailable()) { // |frame| carries the decoded frame. scoped_ptr<YuvFrame> frame(yuv_frame_queue_.front()); yuv_frame_queue_.pop_front(); + + VideoFrame::CreateFrame(GetSurfaceFormat(), + width_, height_, + StreamSample::kInvalidTimestamp, + StreamSample::kInvalidTimestamp, + request.frame); + if (!request.frame->get()) { + // TODO(jiesun): this is also an error case handled as normal. + return; + } *request.got_result = true; - // TODO(ajwong): This is a memcpy(). Avoid this. - // TODO(ajwong): This leaks memory. Fix by not using AVFrame. + // TODO(jiesun): Assume YUV 420 format. const int pixels = width_ * height_; - request.frame->data[0] = y_buffer_.get(); - request.frame->data[1] = u_buffer_.get(); - request.frame->data[2] = v_buffer_.get(); - request.frame->linesize[0] = width_; - request.frame->linesize[1] = width_ / 2; - request.frame->linesize[2] = width_ / 2; - - memcpy(request.frame->data[0], frame->data, pixels); - memcpy(request.frame->data[1], frame->data + pixels, + memcpy((*request.frame)->data(VideoFrame::kYPlane), frame->data, pixels); + memcpy((*request.frame)->data(VideoFrame::kUPlane), frame->data + pixels, pixels / 4); - memcpy(request.frame->data[2], + memcpy((*request.frame)->data(VideoFrame::kVPlane), frame->data + pixels + pixels /4, pixels / 4); } - - // Notify the read request to this object has been fulfilled. - request.done_cb->Run(); - - // Notify OmxCodec that we have finished using this buffer. - if (callback) { - callback->Run(buffer_id); - delete callback; - } } void OmxVideoDecodeEngine::OnReadComplete( diff --git a/media/filters/omx_video_decode_engine.h b/media/filters/omx_video_decode_engine.h index e5f7946..273006c 100644 --- a/media/filters/omx_video_decode_engine.h +++ b/media/filters/omx_video_decode_engine.h @@ -18,7 +18,6 @@ class MessageLoop; // FFmpeg types. -struct AVFrame; struct AVRational; struct AVStream; @@ -34,7 +33,8 @@ class OmxVideoDecodeEngine : public VideoDecodeEngine, // Implementation of the VideoDecodeEngine Interface. virtual void Initialize(AVStream* stream, Task* done_cb); - virtual void DecodeFrame(Buffer* buffer, AVFrame* yuv_frame, + virtual void DecodeFrame(Buffer* buffer, + scoped_refptr<VideoFrame>* video_frame, bool* got_result, Task* done_cb); virtual void Flush(Task* done_cb); virtual VideoFrame::Format GetSurfaceFormat() const; @@ -83,13 +83,13 @@ class OmxVideoDecodeEngine : public VideoDecodeEngine, // A struct to hold parameters of a decode request. Objects pointed by // these parameters are owned by the caller. struct DecodeRequest { - DecodeRequest(AVFrame* f, bool* b, Task* cb) + DecodeRequest(scoped_refptr<VideoFrame>* f, bool* b, Task* cb) : frame(f), got_result(b), done_cb(cb) { } - AVFrame* frame; + scoped_refptr<VideoFrame>* frame; bool* got_result; Task* done_cb; }; diff --git a/media/filters/video_decode_engine.h b/media/filters/video_decode_engine.h index 9f05ab5..975ecea 100644 --- a/media/filters/video_decode_engine.h +++ b/media/filters/video_decode_engine.h @@ -40,7 +40,8 @@ class VideoDecodeEngine { // // TODO(ajwong): Should this function just allocate a new yuv_frame and return // it via a "GetNextFrame()" method or similar? - virtual void DecodeFrame(Buffer* buffer, AVFrame* yuv_frame, + virtual void DecodeFrame(Buffer* buffer, + scoped_refptr<VideoFrame>* video_frame, bool* got_result, Task* done_cb) = 0; // Flushes the decode engine of any buffered input packets. diff --git a/media/filters/video_decoder_impl.cc b/media/filters/video_decoder_impl.cc index 9592dbb..a4a655e 100644 --- a/media/filters/video_decoder_impl.cc +++ b/media/filters/video_decoder_impl.cc @@ -139,31 +139,31 @@ void VideoDecoderImpl::DoDecode(Buffer* buffer, Task* done_cb) { } // Otherwise, attempt to decode a single frame. - AVFrame* yuv_frame = avcodec_alloc_frame(); bool* got_frame = new bool; + scoped_refptr<VideoFrame>* video_frame = new scoped_refptr<VideoFrame>(NULL); decode_engine_->DecodeFrame( buffer, - yuv_frame, + video_frame, got_frame, NewRunnableMethod(this, &VideoDecoderImpl::OnDecodeComplete, - yuv_frame, + video_frame, got_frame, done_runner.release())); } -void VideoDecoderImpl::OnDecodeComplete(AVFrame* yuv_frame, bool* got_frame, - Task* done_cb) { +void VideoDecoderImpl::OnDecodeComplete(scoped_refptr<VideoFrame>* video_frame, + bool* got_frame, Task* done_cb) { // Note: The |done_runner| must be declared *last* to ensure proper // destruction order. - scoped_ptr_malloc<AVFrame, ScopedPtrAVFree> yuv_frame_deleter(yuv_frame); scoped_ptr<bool> got_frame_deleter(got_frame); + scoped_ptr<scoped_refptr<VideoFrame> > video_frame_deleter(video_frame); AutoTaskRunner done_runner(done_cb); // If we actually got data back, enqueue a frame. if (*got_frame) { last_pts_ = FindPtsAndDuration(*time_base_, pts_heap_, last_pts_, - yuv_frame); + video_frame->get()); // Pop off a pts on a successful decode since we are "using up" one // timestamp. @@ -179,12 +179,9 @@ void VideoDecoderImpl::OnDecodeComplete(AVFrame* yuv_frame, bool* got_frame, NOTREACHED() << "Attempting to decode more frames than were input."; } - if (!EnqueueVideoFrame( - decode_engine_->GetSurfaceFormat(), last_pts_, yuv_frame)) { - // On an EnqueueEmptyFrame error, error out the whole pipeline and - // set the state to kDecodeFinished. - SignalPipelineError(); - } + (*video_frame)->SetTimestamp(last_pts_.timestamp); + (*video_frame)->SetDuration(last_pts_.duration); + EnqueueVideoFrame(*video_frame); } else { // When in kFlushCodec, any errored decode, or a 0-lengthed frame, // is taken as a signal to stop decoding. @@ -195,59 +192,9 @@ void VideoDecoderImpl::OnDecodeComplete(AVFrame* yuv_frame, bool* got_frame, } } -bool VideoDecoderImpl::EnqueueVideoFrame(VideoFrame::Format surface_format, - const TimeTuple& time, - const AVFrame* frame) { - // TODO(fbarchard): Work around for FFmpeg http://crbug.com/27675 - // The decoder is in a bad state and not decoding correctly. - // Checking for NULL avoids a crash in CopyPlane(). - if (!frame->data[VideoFrame::kYPlane] || - !frame->data[VideoFrame::kUPlane] || - !frame->data[VideoFrame::kVPlane]) { - return true; - } - - scoped_refptr<VideoFrame> video_frame; - VideoFrame::CreateFrame(surface_format, width_, height_, - time.timestamp, time.duration, &video_frame); - if (!video_frame) { - return false; - } - - // Copy the frame data since FFmpeg reuses internal buffers for AVFrame - // output, meaning the data is only valid until the next - // avcodec_decode_video() call. - // TODO(scherkus): figure out pre-allocation/buffer cycling scheme. - // TODO(scherkus): is there a cleaner way to figure out the # of planes? - CopyPlane(VideoFrame::kYPlane, *video_frame, frame); - CopyPlane(VideoFrame::kUPlane, *video_frame, frame); - CopyPlane(VideoFrame::kVPlane, *video_frame, frame); +void VideoDecoderImpl::EnqueueVideoFrame( + const scoped_refptr<VideoFrame>& video_frame) { EnqueueResult(video_frame); - return true; -} - -void VideoDecoderImpl::CopyPlane(size_t plane, - const VideoFrame& video_frame, - const AVFrame* frame) { - DCHECK(video_frame.width() % 2 == 0); - const uint8* source = frame->data[plane]; - const size_t source_stride = frame->linesize[plane]; - uint8* dest = video_frame.data(plane); - const size_t dest_stride = video_frame.stride(plane); - size_t bytes_per_line = video_frame.width(); - size_t copy_lines = video_frame.height(); - if (plane != VideoFrame::kYPlane) { - bytes_per_line /= 2; - if (video_frame.format() == VideoFrame::YV12) { - copy_lines = (copy_lines + 1) / 2; - } - } - DCHECK(bytes_per_line <= source_stride && bytes_per_line <= dest_stride); - for (size_t i = 0; i < copy_lines; ++i) { - memcpy(dest, source, bytes_per_line); - source += source_stride; - dest += dest_stride; - } } void VideoDecoderImpl::EnqueueEmptyFrame() { @@ -260,29 +207,24 @@ VideoDecoderImpl::TimeTuple VideoDecoderImpl::FindPtsAndDuration( const AVRational& time_base, const PtsHeap& pts_heap, const TimeTuple& last_pts, - const AVFrame* frame) { + const VideoFrame* frame) { TimeTuple pts; - // Default |repeat_pict| to 0 because if there is no frame information, - // we just assume the frame only plays for one time_base. - int repeat_pict = 0; + // First search the VideoFrame for the pts. This is the most authoritative. + // Make a special exclusion for the value pts == 0. Though this is + // technically a valid value, it seems a number of ffmpeg codecs will + // mistakenly always set pts to 0. + DCHECK(frame); + base::TimeDelta timestamp = frame->GetTimestamp(); + if (timestamp != StreamSample::kInvalidTimestamp && + timestamp.ToInternalValue() != 0) { + pts.timestamp = ConvertTimestamp(time_base, timestamp.ToInternalValue()); + pts.duration = ConvertTimestamp(time_base, 1 + frame->GetRepeatCount()); + return pts; + } - // First search the AVFrame for the pts. This is the most authoritative. - // Make a special exclusion for the value frame->pts == 0. Though this - // is technically a valid value, it seems a number of ffmpeg codecs will - // mistakenly always set frame->pts to 0. - // - // Oh, and we have to cast AV_NOPTS_VALUE since it ends up becoming unsigned - // because the value they use doesn't fit in a signed 64-bit number which - // produces a signedness comparison warning on gcc. - if (frame && - (frame->pts != static_cast<int64_t>(AV_NOPTS_VALUE)) && - (frame->pts != 0)) { - pts.timestamp = ConvertTimestamp(time_base, frame->pts); - repeat_pict = frame->repeat_pict; - } else if (!pts_heap.IsEmpty()) { - // If the frame did not have pts, try to get the pts from the - // |pts_heap|. + if (!pts_heap.IsEmpty()) { + // If the frame did not have pts, try to get the pts from the |pts_heap|. pts.timestamp = pts_heap.Top(); } else { DCHECK(last_pts.timestamp != StreamSample::kInvalidTimestamp); @@ -292,9 +234,7 @@ VideoDecoderImpl::TimeTuple VideoDecoderImpl::FindPtsAndDuration( } // Fill in the duration while accounting for repeated frames. - // - // TODO(ajwong): Make sure this formula is correct. - pts.duration = ConvertTimestamp(time_base, 1 + repeat_pict); + pts.duration = ConvertTimestamp(time_base, 1); return pts; } diff --git a/media/filters/video_decoder_impl.h b/media/filters/video_decoder_impl.h index 7f34912..033cd77 100644 --- a/media/filters/video_decoder_impl.h +++ b/media/filters/video_decoder_impl.h @@ -12,7 +12,6 @@ #include "testing/gtest/include/gtest/gtest_prod.h" // FFmpeg types. -struct AVFrame; struct AVRational; namespace media { @@ -61,21 +60,16 @@ class VideoDecoderImpl : public DecoderBase<VideoDecoder, VideoFrame> { virtual void DoSeek(base::TimeDelta time, Task* done_cb); virtual void DoDecode(Buffer* buffer, Task* done_cb); - virtual bool EnqueueVideoFrame(VideoFrame::Format surface_format, - const TimeTuple& time, - const AVFrame* frame); + virtual void EnqueueVideoFrame(const scoped_refptr<VideoFrame>& video_frame); // Create an empty video frame and queue it. virtual void EnqueueEmptyFrame(); - virtual void CopyPlane(size_t plane, const VideoFrame& video_frame, - const AVFrame* frame); - // Methods that pickup after the decode engine has finished its action. virtual void OnInitializeComplete(bool* success /* Not owned */, Task* done_cb); - virtual void OnDecodeComplete(AVFrame* yuv_frame, bool* got_frame, - Task* done_cb); + virtual void OnDecodeComplete(scoped_refptr<VideoFrame>* video_frame, + bool* got_frame, Task* done_cb); // Attempt to get the PTS and Duration for this frame by examining the time // info provided via packet stream (stored in |pts_heap|), or the info @@ -88,7 +82,7 @@ class VideoDecoderImpl : public DecoderBase<VideoDecoder, VideoFrame> { virtual TimeTuple FindPtsAndDuration(const AVRational& time_base, const PtsHeap& pts_heap, const TimeTuple& last_pts, - const AVFrame* frame); + const VideoFrame* frame); // Signals the pipeline that a decode error occurs, and moves the decoder // into the kDecodeFinished state. diff --git a/media/filters/video_decoder_impl_unittest.cc b/media/filters/video_decoder_impl_unittest.cc index 4ab6d65..7c321c5 100644 --- a/media/filters/video_decoder_impl_unittest.cc +++ b/media/filters/video_decoder_impl_unittest.cc @@ -13,6 +13,7 @@ #include "media/base/mock_filter_host.h" #include "media/base/mock_filters.h" #include "media/base/mock_task.h" +#include "media/base/video_frame.h" #include "media/ffmpeg/ffmpeg_common.h" #include "media/filters/ffmpeg_interfaces.h" #include "media/filters/ffmpeg_video_decoder.h" @@ -49,7 +50,8 @@ class MockFFmpegDemuxerStream : public MockDemuxerStream, class MockVideoDecodeEngine : public VideoDecodeEngine { public: MOCK_METHOD2(Initialize, void(AVStream* stream, Task* done_cb)); - MOCK_METHOD4(DecodeFrame, void(Buffer* buffer, AVFrame* yuv_frame, + MOCK_METHOD4(DecodeFrame, void(Buffer* buffer, + scoped_refptr<VideoFrame>* video_frame, bool* got_result, Task* done_cb)); MOCK_METHOD1(Flush, void(Task* done_cb)); MOCK_CONST_METHOD0(state, State()); @@ -63,16 +65,17 @@ class DecoderPrivateMock : public VideoDecoderImpl { : VideoDecoderImpl(engine) { } - MOCK_METHOD3(EnqueueVideoFrame, bool(VideoFrame::Format surface_format, - const TimeTuple& time, - const AVFrame* frame)); + MOCK_METHOD1(EnqueueVideoFrame, + void(const scoped_refptr<VideoFrame>& video_frame)); MOCK_METHOD0(EnqueueEmptyFrame, void()); - MOCK_METHOD3(CopyPlane, void(size_t plane, const VideoFrame* video_frame, + MOCK_METHOD3(CopyPlane, void(size_t plane, + const scoped_refptr<VideoFrame> video_frame, const AVFrame* frame)); - MOCK_METHOD4(FindPtsAndDuration, TimeTuple(const AVRational& time_base, - const PtsHeap& pts_heap, - const TimeTuple& last_pts, - const AVFrame* frame)); + MOCK_METHOD4(FindPtsAndDuration, TimeTuple( + const AVRational& time_base, + const PtsHeap& pts_heap, + const TimeTuple& last_pts, + const VideoFrame* frame)); MOCK_METHOD0(SignalPipelineError, void()); }; @@ -110,6 +113,9 @@ class VideoDecoderImplTest : public testing::Test { memset(&codec_context_, 0, sizeof(codec_context_)); memset(&codec_, 0, sizeof(codec_)); memset(&yuv_frame_, 0, sizeof(yuv_frame_)); + base::TimeDelta zero; + VideoFrame::CreateFrame(VideoFrame::YV12, kWidth, kHeight, + zero, zero, &video_frame_); stream_.codec = &codec_context_; codec_context_.width = kWidth; @@ -148,6 +154,7 @@ class VideoDecoderImplTest : public testing::Test { AVCodecContext codec_context_; AVCodec codec_; AVFrame yuv_frame_; + scoped_refptr<VideoFrame> video_frame_; StrictMock<MockFFmpeg> mock_ffmpeg_; private: @@ -262,27 +269,21 @@ TEST_F(VideoDecoderImplTest, FindPtsAndDuration) { last_pts.duration = base::TimeDelta::FromMicroseconds(16); // Simulate an uninitialized yuv_frame. - yuv_frame_.pts = AV_NOPTS_VALUE; + video_frame_->SetTimestamp( + base::TimeDelta::FromMicroseconds(AV_NOPTS_VALUE)); VideoDecoderImpl::TimeTuple result_pts = - decoder_->FindPtsAndDuration(time_base, pts_heap, last_pts, &yuv_frame_); - EXPECT_EQ(116, result_pts.timestamp.InMicroseconds()); - EXPECT_EQ(500000, result_pts.duration.InMicroseconds()); - - // Test that providing no frame has the same result as an uninitialized - // frame. - result_pts = decoder_->FindPtsAndDuration(time_base, - pts_heap, - last_pts, - NULL); + decoder_->FindPtsAndDuration(time_base, pts_heap, + last_pts, video_frame_.get()); EXPECT_EQ(116, result_pts.timestamp.InMicroseconds()); EXPECT_EQ(500000, result_pts.duration.InMicroseconds()); // Test that having pts == 0 in the frame also behaves like the pts is not // provided. This is because FFmpeg set the pts to zero when there is no // data for the frame, which means that value is useless to us. - yuv_frame_.pts = 0; + video_frame_->SetTimestamp(base::TimeDelta::FromMicroseconds(0)); result_pts = - decoder_->FindPtsAndDuration(time_base, pts_heap, last_pts, &yuv_frame_); + decoder_->FindPtsAndDuration(time_base, pts_heap, + last_pts, video_frame_.get()); EXPECT_EQ(116, result_pts.timestamp.InMicroseconds()); EXPECT_EQ(500000, result_pts.duration.InMicroseconds()); @@ -291,17 +292,17 @@ TEST_F(VideoDecoderImplTest, FindPtsAndDuration) { result_pts = decoder_->FindPtsAndDuration(time_base, pts_heap, last_pts, - &yuv_frame_); + video_frame_.get()); EXPECT_EQ(123, result_pts.timestamp.InMicroseconds()); EXPECT_EQ(500000, result_pts.duration.InMicroseconds()); // Add a pts into the frame and make sure it overrides the timequeue. - yuv_frame_.pts = 333; - yuv_frame_.repeat_pict = 2; + video_frame_->SetTimestamp(base::TimeDelta::FromMicroseconds(333)); + video_frame_->SetRepeatCount(2); result_pts = decoder_->FindPtsAndDuration(time_base, pts_heap, last_pts, - &yuv_frame_); + video_frame_.get()); EXPECT_EQ(166500000, result_pts.timestamp.InMicroseconds()); EXPECT_EQ(1500000, result_pts.duration.InMicroseconds()); } @@ -326,40 +327,28 @@ TEST_F(VideoDecoderImplTest, DoDecode_TestStateTransition) { EXPECT_CALL(*mock_engine, DecodeFrame(_, _, _,_)) .WillOnce(DoAll(SetArgumentPointee<2>(false), WithArg<3>(InvokeRunnable()))) - .WillOnce(DoAll(SetArgumentPointee<1>(yuv_frame_), + .WillOnce(DoAll(SetArgumentPointee<1>(video_frame_), SetArgumentPointee<2>(true), WithArg<3>(InvokeRunnable()))) .WillOnce(DoAll(SetArgumentPointee<2>(false), WithArg<3>(InvokeRunnable()))) - .WillOnce(DoAll(SetArgumentPointee<1>(yuv_frame_), + .WillOnce(DoAll(SetArgumentPointee<1>(video_frame_), SetArgumentPointee<2>(true), WithArg<3>(InvokeRunnable()))) - .WillOnce(DoAll(SetArgumentPointee<1>(yuv_frame_), + .WillOnce(DoAll(SetArgumentPointee<1>(video_frame_), SetArgumentPointee<2>(true), WithArg<3>(InvokeRunnable()))) .WillOnce(DoAll(SetArgumentPointee<2>(false), WithArg<3>(InvokeRunnable()))); - EXPECT_CALL(*mock_engine, GetSurfaceFormat()) - .Times(3) - .WillRepeatedly(Return(VideoFrame::YV16)); EXPECT_CALL(*mock_decoder, FindPtsAndDuration(_, _, _, _)) .WillOnce(Return(kTestPts1)) .WillOnce(Return(kTestPts2)) .WillOnce(Return(kTestPts1)); - EXPECT_CALL(*mock_decoder, EnqueueVideoFrame(_, _, _)) - .Times(3) - .WillRepeatedly(Return(true)); + EXPECT_CALL(*mock_decoder, EnqueueVideoFrame(_)) + .Times(3); EXPECT_CALL(*mock_decoder, EnqueueEmptyFrame()) .Times(1); - // Setup FFmpeg expectations for frame allocations. We do - // 6 decodes in this test. - EXPECT_CALL(mock_ffmpeg_, AVCodecAllocFrame()) - .Times(6) - .WillRepeatedly(Return(&yuv_frame_)); - EXPECT_CALL(mock_ffmpeg_, AVFree(&yuv_frame_)) - .Times(6); - // Setup callbacks to be executed 6 times. TaskMocker done_cb; EXPECT_CALL(done_cb, Run()).Times(6); @@ -416,37 +405,6 @@ TEST_F(VideoDecoderImplTest, DoDecode_TestStateTransition) { EXPECT_TRUE(mock_decoder->pts_heap_.IsEmpty()); } -TEST_F(VideoDecoderImplTest, DoDecode_EnqueueVideoFrameError) { - MockVideoDecodeEngine* mock_engine = new StrictMock<MockVideoDecodeEngine>(); - scoped_refptr<DecoderPrivateMock> mock_decoder = - new StrictMock<DecoderPrivateMock>(mock_engine); - - // Setup decoder to decode one frame, but then fail on enqueue. - EXPECT_CALL(*mock_engine, DecodeFrame(_, _, _,_)) - .WillOnce(DoAll(SetArgumentPointee<1>(yuv_frame_), - SetArgumentPointee<2>(true), - WithArg<3>(InvokeRunnable()))); - EXPECT_CALL(*mock_engine, GetSurfaceFormat()) - .WillOnce(Return(VideoFrame::YV16)); - EXPECT_CALL(*mock_decoder, FindPtsAndDuration(_, _, _, _)) - .WillOnce(Return(kTestPts1)); - EXPECT_CALL(*mock_decoder, EnqueueVideoFrame(_, _, _)) - .WillOnce(Return(false)); - EXPECT_CALL(*mock_decoder, SignalPipelineError()); - - // Count the callback invoked. - TaskMocker done_cb; - EXPECT_CALL(done_cb, Run()).Times(1); - - // Setup FFmpeg expectations for frame allocations. - EXPECT_CALL(mock_ffmpeg_, AVCodecAllocFrame()) - .WillOnce(Return(&yuv_frame_)); - EXPECT_CALL(mock_ffmpeg_, AVFree(&yuv_frame_)); - - // Attempt the decode. - mock_decoder->DoDecode(buffer_, done_cb.CreateTask()); -} - TEST_F(VideoDecoderImplTest, DoDecode_FinishEnqueuesEmptyFrames) { MockVideoDecodeEngine* mock_engine = new StrictMock<MockVideoDecodeEngine>(); scoped_refptr<DecoderPrivateMock> mock_decoder = |