diff options
author | dalecurtis@google.com <dalecurtis@google.com@0039d316-1c4b-4281-b951-d872f2087c98> | 2014-05-21 20:40:13 +0000 |
---|---|---|
committer | dalecurtis@google.com <dalecurtis@google.com@0039d316-1c4b-4281-b951-d872f2087c98> | 2014-05-21 20:40:13 +0000 |
commit | 3400ccd29e1fc6503ec21a5a547747fa9782f0d0 (patch) | |
tree | ac8993ef477b6280dcbd788b65410e3007401db7 /media | |
parent | 540266762787503b9fd166c1a5db6be00c2a0e1d (diff) | |
download | chromium_src-3400ccd29e1fc6503ec21a5a547747fa9782f0d0.zip chromium_src-3400ccd29e1fc6503ec21a5a547747fa9782f0d0.tar.gz chromium_src-3400ccd29e1fc6503ec21a5a547747fa9782f0d0.tar.bz2 |
Roll FFmpeg for M37.
Syncs to head as of bebce653e5601ceafa004db0eb6b2c7d4d16f0c0. The
roll requires us to stop using some deprecated features:
- Replaces av_get_frame_defaults() with av_frame_unref() per docs.
- Switches FFmpegVideoDecoder to use ref-counted frames.
- Removes error concealment settings and disables error resilience
for all platforms except ChromeOS (since it's required for mpeg4).
Avoiding these also allows us to disable some deprecated features
wholesale via #defines.
BUG=119020,236611
TEST=all tests (and regression tests) pass under asan
NOTRY=true
R=jrummell@chromium.org, wolenetz@chromium.org
Committed: https://src.chromium.org/viewvc/chrome?view=rev&revision=271945
Review URL: https://codereview.chromium.org/286953005
git-svn-id: svn://svn.chromium.org/chrome/trunk/src@271969 0039d316-1c4b-4281-b951-d872f2087c98
Diffstat (limited to 'media')
-rw-r--r-- | media/base/audio_video_metadata_extractor_unittest.cc | 2 | ||||
-rw-r--r-- | media/base/media_file_checker.cc | 6 | ||||
-rw-r--r-- | media/cast/test/sender.cc | 14 | ||||
-rw-r--r-- | media/cdm/ppapi/external_clear_key/ffmpeg_cdm_audio_decoder.cc | 2 | ||||
-rw-r--r-- | media/cdm/ppapi/external_clear_key/ffmpeg_cdm_video_decoder.cc | 2 | ||||
-rw-r--r-- | media/ffmpeg/ffmpeg_common.h | 10 | ||||
-rw-r--r-- | media/ffmpeg/ffmpeg_unittest.cc | 4 | ||||
-rw-r--r-- | media/filters/audio_file_reader.cc | 4 | ||||
-rw-r--r-- | media/filters/ffmpeg_audio_decoder.cc | 1 | ||||
-rw-r--r-- | media/filters/ffmpeg_demuxer.cc | 10 | ||||
-rw-r--r-- | media/filters/ffmpeg_h264_to_annex_b_bitstream_converter.cc | 8 | ||||
-rw-r--r-- | media/filters/ffmpeg_video_decoder.cc | 77 | ||||
-rw-r--r-- | media/filters/ffmpeg_video_decoder.h | 6 | ||||
-rw-r--r-- | media/filters/pipeline_integration_test_base.cc | 3 |
14 files changed, 75 insertions, 74 deletions
diff --git a/media/base/audio_video_metadata_extractor_unittest.cc b/media/base/audio_video_metadata_extractor_unittest.cc index 8ab1df9..0af6e16 100644 --- a/media/base/audio_video_metadata_extractor_unittest.cc +++ b/media/base/audio_video_metadata_extractor_unittest.cc @@ -137,7 +137,7 @@ TEST(AudioVideoMetadataExtractorTest, AndroidRotatedMP4Video) { extractor->stream_infos()[0].tags.find("minor_version")->second); EXPECT_EQ("h264", extractor->stream_infos()[1].type); - EXPECT_EQ(4u, extractor->stream_infos()[1].tags.size()); + EXPECT_EQ(5u, extractor->stream_infos()[1].tags.size()); EXPECT_EQ("2014-02-11 00:39:25", extractor->stream_infos()[1].tags.find("creation_time")->second); EXPECT_EQ("VideoHandle", diff --git a/media/base/media_file_checker.cc b/media/base/media_file_checker.cc index 418839d..4a49ac7 100644 --- a/media/base/media_file_checker.cc +++ b/media/base/media_file_checker.cc @@ -84,18 +84,20 @@ bool MediaFileChecker::Start(base::TimeDelta check_time) { // decoded; otherwise av_free_packet() will corrupt memory. AVPacket temp_packet = packet; do { - avcodec_get_frame_defaults(frame.get()); result = avcodec_decode_audio4(av_context, frame.get(), &frame_decoded, &temp_packet); if (result < 0) break; + av_frame_unref(frame.get()); temp_packet.size -= result; temp_packet.data += result; + frame_decoded = 0; } while (temp_packet.size > 0); } else if (av_context->codec_type == AVMEDIA_TYPE_VIDEO) { - avcodec_get_frame_defaults(frame.get()); result = avcodec_decode_video2(av_context, frame.get(), &frame_decoded, &packet); + if (result >= 0 && frame_decoded) + av_frame_unref(frame.get()); } av_free_packet(&packet); } while (base::TimeTicks::Now() < deadline && read_ok && result >= 0); diff --git a/media/cast/test/sender.cc b/media/cast/test/sender.cc index 9dd3581..e457e2c 100644 --- a/media/cast/test/sender.cc +++ b/media/cast/test/sender.cc @@ -140,7 +140,7 @@ VideoSenderConfig GetVideoSenderConfig() { return video_config; } -void AVFreeFrame(AVFrame* frame) { avcodec_free_frame(&frame); } +void AVFreeFrame(AVFrame* frame) { av_frame_free(&frame); } class SendProcess { public: @@ -533,11 +533,11 @@ class SendProcess { // Audio. AVFrame* avframe = av_frame_alloc(); - // Shallow copy of the packet. + // Make a shallow copy of packet so we can slide packet.data as frames are + // decoded from the packet; otherwise av_free_packet() will corrupt memory. AVPacket packet_temp = *packet.get(); do { - avcodec_get_frame_defaults(avframe); int frame_decoded = 0; int result = avcodec_decode_audio4( av_audio_context(), avframe, &frame_decoded, &packet_temp); @@ -577,8 +577,9 @@ class SendProcess { // Note: Not all files have correct values for pkt_pts. base::TimeDelta::FromMilliseconds(avframe->pkt_pts)); audio_algo_.EnqueueBuffer(buffer); + av_frame_unref(avframe); } while (packet_temp.size > 0); - avcodec_free_frame(&avframe); + av_frame_free(&avframe); const int frames_needed_to_scale = playback_rate_ * av_audio_context()->sample_rate / @@ -618,15 +619,16 @@ class SendProcess { // Video. int got_picture; AVFrame* avframe = av_frame_alloc(); - avcodec_get_frame_defaults(avframe); // Tell the decoder to reorder for us. avframe->reordered_opaque = av_video_context()->reordered_opaque = packet->pts; CHECK(avcodec_decode_video2( av_video_context(), avframe, &got_picture, packet.get()) >= 0) << "Video decode error."; - if (!got_picture) + if (!got_picture) { + av_frame_free(&avframe); return; + } gfx::Size size(av_video_context()->width, av_video_context()->height); if (!video_first_pts_set_ || avframe->reordered_opaque < video_first_pts_) { diff --git a/media/cdm/ppapi/external_clear_key/ffmpeg_cdm_audio_decoder.cc b/media/cdm/ppapi/external_clear_key/ffmpeg_cdm_audio_decoder.cc index 012aaf5..c35b178 100644 --- a/media/cdm/ppapi/external_clear_key/ffmpeg_cdm_audio_decoder.cc +++ b/media/cdm/ppapi/external_clear_key/ffmpeg_cdm_audio_decoder.cc @@ -271,7 +271,7 @@ cdm::Status FFmpegCdmAudioDecoder::DecodeBuffer( // skipping end of stream packets since they have a size of zero. do { // Reset frame to default values. - avcodec_get_frame_defaults(av_frame_.get()); + av_frame_unref(av_frame_.get()); int frame_decoded = 0; int result = avcodec_decode_audio4( diff --git a/media/cdm/ppapi/external_clear_key/ffmpeg_cdm_video_decoder.cc b/media/cdm/ppapi/external_clear_key/ffmpeg_cdm_video_decoder.cc index 320362f..942dce1 100644 --- a/media/cdm/ppapi/external_clear_key/ffmpeg_cdm_video_decoder.cc +++ b/media/cdm/ppapi/external_clear_key/ffmpeg_cdm_video_decoder.cc @@ -226,7 +226,7 @@ cdm::Status FFmpegCdmVideoDecoder::DecodeFrame( codec_context_->reordered_opaque = timestamp; // Reset frame to default values. - avcodec_get_frame_defaults(av_frame_.get()); + av_frame_unref(av_frame_.get()); // This is for codecs not using get_buffer to initialize // |av_frame_->reordered_opaque| diff --git a/media/ffmpeg/ffmpeg_common.h b/media/ffmpeg/ffmpeg_common.h index c697bee..ef1a7b6 100644 --- a/media/ffmpeg/ffmpeg_common.h +++ b/media/ffmpeg/ffmpeg_common.h @@ -19,6 +19,14 @@ // Include FFmpeg header files. extern "C" { +// Disable deprecated features which result in spammy compile warnings. This +// list of defines must mirror those in the 'defines' section of the ffmpeg.gyp +// file or the headers below will generate different structures. +#define FF_API_PIX_FMT_DESC 0 +#define FF_API_OLD_DECODE_AUDIO 0 +#define FF_API_DESTRUCT_PACKET 0 +#define FF_API_GET_BUFFER 0 + // Temporarily disable possible loss of data warning. // TODO(scherkus): fix and upstream the compiler warnings. MSVC_PUSH_DISABLE_WARNING(4244); @@ -61,7 +69,7 @@ inline void ScopedPtrAVFreeContext::operator()(void* x) const { inline void ScopedPtrAVFreeFrame::operator()(void* x) const { AVFrame* frame = static_cast<AVFrame*>(x); - avcodec_free_frame(&frame); + av_frame_free(&frame); } // Converts an int64 timestamp in |time_base| units to a base::TimeDelta. diff --git a/media/ffmpeg/ffmpeg_unittest.cc b/media/ffmpeg/ffmpeg_unittest.cc index ea1d0e1..dbc28c5 100644 --- a/media/ffmpeg/ffmpeg_unittest.cc +++ b/media/ffmpeg/ffmpeg_unittest.cc @@ -233,7 +233,7 @@ class FFmpegTest : public testing::TestWithParam<const char*> { memcpy(&packet, audio_packets_.peek(), sizeof(packet)); } - avcodec_get_frame_defaults(audio_buffer_.get()); + av_frame_unref(audio_buffer_.get()); result = avcodec_decode_audio4(av_audio_context(), audio_buffer_.get(), &got_audio, &packet); if (!audio_packets_.empty()) { @@ -287,7 +287,7 @@ class FFmpegTest : public testing::TestWithParam<const char*> { memcpy(&packet, video_packets_.peek(), sizeof(packet)); } - avcodec_get_frame_defaults(video_buffer_.get()); + av_frame_unref(video_buffer_.get()); av_video_context()->reordered_opaque = packet.pts; result = avcodec_decode_video2(av_video_context(), video_buffer_.get(), &got_picture, &packet); diff --git a/media/filters/audio_file_reader.cc b/media/filters/audio_file_reader.cc index 80f8ce6..b20fd8d 100644 --- a/media/filters/audio_file_reader.cc +++ b/media/filters/audio_file_reader.cc @@ -131,7 +131,9 @@ int AudioFileReader::Read(AudioBus* audio_bus) { // decoded from the packet; otherwise av_free_packet() will corrupt memory. AVPacket packet_temp = packet; do { - avcodec_get_frame_defaults(av_frame.get()); + // Reset frame to default values. + av_frame_unref(av_frame.get()); + int frame_decoded = 0; int result = avcodec_decode_audio4( codec_context_, av_frame.get(), &frame_decoded, &packet_temp); diff --git a/media/filters/ffmpeg_audio_decoder.cc b/media/filters/ffmpeg_audio_decoder.cc index cd988ed..704eefd 100644 --- a/media/filters/ffmpeg_audio_decoder.cc +++ b/media/filters/ffmpeg_audio_decoder.cc @@ -378,7 +378,6 @@ bool FFmpegAudioDecoder::FFmpegDecode( DCHECK_GE(unread_frames, 0); if (unread_frames > 0) output->TrimEnd(unread_frames); - av_frame_unref(av_frame_.get()); } diff --git a/media/filters/ffmpeg_demuxer.cc b/media/filters/ffmpeg_demuxer.cc index 061d631..f5b4fdd 100644 --- a/media/filters/ffmpeg_demuxer.cc +++ b/media/filters/ffmpeg_demuxer.cc @@ -902,15 +902,7 @@ void FFmpegDemuxer::OnReadFrameDone(ScopedAVPacket packet, int result) { if (!packet->data) { ScopedAVPacket new_packet(new AVPacket()); av_new_packet(new_packet.get(), 0); - - new_packet->pts = packet->pts; - new_packet->dts = packet->dts; - new_packet->pos = packet->pos; - new_packet->duration = packet->duration; - new_packet->convergence_duration = packet->convergence_duration; - new_packet->flags = packet->flags; - new_packet->stream_index = packet->stream_index; - + av_packet_copy_props(new_packet.get(), packet.get()); packet.swap(new_packet); } diff --git a/media/filters/ffmpeg_h264_to_annex_b_bitstream_converter.cc b/media/filters/ffmpeg_h264_to_annex_b_bitstream_converter.cc index 17eff7b..16dbe85 100644 --- a/media/filters/ffmpeg_h264_to_annex_b_bitstream_converter.cc +++ b/media/filters/ffmpeg_h264_to_annex_b_bitstream_converter.cc @@ -54,13 +54,7 @@ bool FFmpegH264ToAnnexBBitstreamConverter::ConvertPacket(AVPacket* packet) { // This is a bit tricky: since the interface does not allow us to replace // the pointer of the old packet with a new one, we will initially copy the // metadata from old packet to new bigger packet. - dest_packet.pts = packet->pts; - dest_packet.dts = packet->dts; - dest_packet.pos = packet->pos; - dest_packet.duration = packet->duration; - dest_packet.convergence_duration = packet->convergence_duration; - dest_packet.flags = packet->flags; - dest_packet.stream_index = packet->stream_index; + av_packet_copy_props(&dest_packet, packet); // Proceed with the conversion of the actual in-band NAL units, leave room // for configuration in the beginning. diff --git a/media/filters/ffmpeg_video_decoder.cc b/media/filters/ffmpeg_video_decoder.cc index bc2346d..aaa8a68 100644 --- a/media/filters/ffmpeg_video_decoder.cc +++ b/media/filters/ffmpeg_video_decoder.cc @@ -54,12 +54,25 @@ static int GetThreadCount(AVCodecID codec_id) { return decode_threads; } +static int GetVideoBufferImpl(struct AVCodecContext* s, + AVFrame* frame, + int flags) { + FFmpegVideoDecoder* decoder = static_cast<FFmpegVideoDecoder*>(s->opaque); + return decoder->GetVideoBuffer(s, frame, flags); +} + +static void ReleaseVideoBufferImpl(void* opaque, uint8* data) { + scoped_refptr<VideoFrame> video_frame; + video_frame.swap(reinterpret_cast<VideoFrame**>(&opaque)); +} + FFmpegVideoDecoder::FFmpegVideoDecoder( const scoped_refptr<base::SingleThreadTaskRunner>& task_runner) : task_runner_(task_runner), state_(kUninitialized) {} -int FFmpegVideoDecoder::GetVideoBuffer(AVCodecContext* codec_context, - AVFrame* frame) { +int FFmpegVideoDecoder::GetVideoBuffer(struct AVCodecContext* codec_context, + AVFrame* frame, + int flags) { // Don't use |codec_context_| here! With threaded decoding, // it will contain unsynchronized width/height/pix_fmt values, // whereas |codec_context| contains the current threads's @@ -103,36 +116,28 @@ int FFmpegVideoDecoder::GetVideoBuffer(AVCodecContext* codec_context, format, coded_size, gfx::Rect(size), natural_size, kNoTimestamp()); for (int i = 0; i < 3; i++) { - frame->base[i] = video_frame->data(i); frame->data[i] = video_frame->data(i); frame->linesize[i] = video_frame->stride(i); } - frame->opaque = NULL; - video_frame.swap(reinterpret_cast<VideoFrame**>(&frame->opaque)); - frame->type = FF_BUFFER_TYPE_USER; frame->width = coded_size.width(); frame->height = coded_size.height(); frame->format = codec_context->pix_fmt; - + frame->reordered_opaque = codec_context->reordered_opaque; + + // Now create an AVBufferRef for the data just allocated. It will own the + // reference to the VideoFrame object. + void* opaque = NULL; + video_frame.swap(reinterpret_cast<VideoFrame**>(&opaque)); + frame->buf[0] = + av_buffer_create(frame->data[0], + VideoFrame::AllocationSize(format, coded_size), + ReleaseVideoBufferImpl, + opaque, + 0); return 0; } -static int GetVideoBufferImpl(AVCodecContext* s, AVFrame* frame) { - FFmpegVideoDecoder* decoder = static_cast<FFmpegVideoDecoder*>(s->opaque); - return decoder->GetVideoBuffer(s, frame); -} - -static void ReleaseVideoBufferImpl(AVCodecContext* s, AVFrame* frame) { - scoped_refptr<VideoFrame> video_frame; - video_frame.swap(reinterpret_cast<VideoFrame**>(&frame->opaque)); - - // The FFmpeg API expects us to zero the data pointers in - // this callback - memset(frame->data, 0, sizeof(frame->data)); - frame->opaque = NULL; -} - void FFmpegVideoDecoder::Initialize(const VideoDecoderConfig& config, bool low_delay, const PipelineStatusCB& status_cb) { @@ -272,9 +277,6 @@ bool FFmpegVideoDecoder::FFmpegDecode( scoped_refptr<VideoFrame>* video_frame) { DCHECK(video_frame); - // Reset frame to default values. - avcodec_get_frame_defaults(av_frame_.get()); - // Create a packet for input data. // Due to FFmpeg API changes we no longer have const read-only pointers. AVPacket packet; @@ -288,10 +290,6 @@ bool FFmpegVideoDecoder::FFmpegDecode( // Let FFmpeg handle presentation timestamp reordering. codec_context_->reordered_opaque = buffer->timestamp().InMicroseconds(); - - // This is for codecs not using get_buffer to initialize - // |av_frame_->reordered_opaque| - av_frame_->reordered_opaque = codec_context_->reordered_opaque; } int frame_decoded = 0; @@ -306,6 +304,11 @@ bool FFmpegVideoDecoder::FFmpegDecode( return false; } + // FFmpeg says some codecs might have multiple frames per packet. Previous + // discussions with rbultje@ indicate this shouldn't be true for the codecs + // we use. + DCHECK_EQ(result, packet.size); + // If no frame was produced then signal that more data is required to // produce more frames. This can happen under two circumstances: // 1) Decoder was recently initialized/flushed @@ -323,18 +326,17 @@ bool FFmpegVideoDecoder::FFmpegDecode( !av_frame_->data[VideoFrame::kVPlane]) { LOG(ERROR) << "Video frame was produced yet has invalid frame data."; *video_frame = NULL; + av_frame_unref(av_frame_.get()); return false; } - if (!av_frame_->opaque) { - LOG(ERROR) << "VideoFrame object associated with frame data not set."; - return false; - } - *video_frame = static_cast<VideoFrame*>(av_frame_->opaque); + *video_frame = + reinterpret_cast<VideoFrame*>(av_buffer_get_opaque(av_frame_->buf[0])); (*video_frame)->set_timestamp( base::TimeDelta::FromMicroseconds(av_frame_->reordered_opaque)); + av_frame_unref(av_frame_.get()); return true; } @@ -351,15 +353,12 @@ bool FFmpegVideoDecoder::ConfigureDecoder(bool low_delay) { codec_context_.reset(avcodec_alloc_context3(NULL)); VideoDecoderConfigToAVCodecContext(config_, codec_context_.get()); - // Enable motion vector search (potentially slow), strong deblocking filter - // for damaged macroblocks, and set our error detection sensitivity. - codec_context_->error_concealment = FF_EC_GUESS_MVS | FF_EC_DEBLOCK; codec_context_->thread_count = GetThreadCount(codec_context_->codec_id); codec_context_->thread_type = low_delay ? FF_THREAD_SLICE : FF_THREAD_FRAME; codec_context_->opaque = this; codec_context_->flags |= CODEC_FLAG_EMU_EDGE; - codec_context_->get_buffer = GetVideoBufferImpl; - codec_context_->release_buffer = ReleaseVideoBufferImpl; + codec_context_->get_buffer2 = GetVideoBufferImpl; + codec_context_->refcounted_frames = 1; AVCodec* codec = avcodec_find_decoder(codec_context_->codec_id); if (!codec || avcodec_open2(codec_context_.get(), codec, NULL) < 0) { diff --git a/media/filters/ffmpeg_video_decoder.h b/media/filters/ffmpeg_video_decoder.h index c8481a2..ced6ddf 100644 --- a/media/filters/ffmpeg_video_decoder.h +++ b/media/filters/ffmpeg_video_decoder.h @@ -41,9 +41,11 @@ class MEDIA_EXPORT FFmpegVideoDecoder : public VideoDecoder { virtual void Stop() OVERRIDE; // Callback called from within FFmpeg to allocate a buffer based on - // the dimensions of |codec_context|. See AVCodecContext.get_buffer + // the dimensions of |codec_context|. See AVCodecContext.get_buffer2 // documentation inside FFmpeg. - int GetVideoBuffer(AVCodecContext *codec_context, AVFrame* frame); + int GetVideoBuffer(struct AVCodecContext* codec_context, + AVFrame* frame, + int flags); private: enum DecoderState { diff --git a/media/filters/pipeline_integration_test_base.cc b/media/filters/pipeline_integration_test_base.cc index 899c676..c179903 100644 --- a/media/filters/pipeline_integration_test_base.cc +++ b/media/filters/pipeline_integration_test_base.cc @@ -215,7 +215,8 @@ PipelineIntegrationTestBase::CreateFilterCollection( const base::FilePath& file_path, Decryptor* decryptor) { FileDataSource* file_data_source = new FileDataSource(); - CHECK(file_data_source->Initialize(file_path)); + CHECK(file_data_source->Initialize(file_path)) << "Is " << file_path.value() + << " missing?"; data_source_.reset(file_data_source); Demuxer::NeedKeyCB need_key_cb = base::Bind( |