summaryrefslogtreecommitdiffstats
path: root/media
diff options
context:
space:
mode:
authordalecurtis@chromium.org <dalecurtis@chromium.org@0039d316-1c4b-4281-b951-d872f2087c98>2012-06-11 22:07:35 +0000
committerdalecurtis@chromium.org <dalecurtis@chromium.org@0039d316-1c4b-4281-b951-d872f2087c98>2012-06-11 22:07:35 +0000
commit1c9744a811af60887347e76068b6c14a9361fba5 (patch)
treeab948f22667f0d53f9b221ce91342654e9e0faad /media
parent9b4e643469f80cd6b186ebb0f03b6c135c0d5478 (diff)
downloadchromium_src-1c9744a811af60887347e76068b6c14a9361fba5.zip
chromium_src-1c9744a811af60887347e76068b6c14a9361fba5.tar.gz
chromium_src-1c9744a811af60887347e76068b6c14a9361fba5.tar.bz2
Switch to using avcodec_decode_audio4, avcodec_alloc_context3.
Allows us to remove another patch from FFmpeg relating to using deprecated features! We're now using the latest and greatest! FFmpeg side changes here, https://gerrit.chromium.org/gerrit/24823 BUG=112673 TEST=ffmpeg_regression_tests, webaudio tests. Review URL: https://chromiumcodereview.appspot.com/10540067 git-svn-id: svn://svn.chromium.org/chrome/trunk/src@141524 0039d316-1c4b-4281-b951-d872f2087c98
Diffstat (limited to 'media')
-rw-r--r--media/ffmpeg/ffmpeg_unittest.cc25
-rw-r--r--media/filters/audio_file_reader.cc26
-rw-r--r--media/filters/ffmpeg_audio_decoder.cc52
-rw-r--r--media/filters/ffmpeg_audio_decoder.h8
-rw-r--r--media/filters/ffmpeg_video_decoder.cc5
-rw-r--r--media/test/ffmpeg_tests/ffmpeg_tests.cc46
-rw-r--r--media/tools/media_bench/media_bench.cc42
7 files changed, 118 insertions, 86 deletions
diff --git a/media/ffmpeg/ffmpeg_unittest.cc b/media/ffmpeg/ffmpeg_unittest.cc
index 49be368..d18343a 100644
--- a/media/ffmpeg/ffmpeg_unittest.cc
+++ b/media/ffmpeg/ffmpeg_unittest.cc
@@ -90,8 +90,7 @@ class FFmpegTest : public testing::TestWithParam<const char*> {
duration_(AV_NOPTS_VALUE) {
InitializeFFmpeg();
- audio_buffer_.reset(
- reinterpret_cast<int16*>(av_malloc(AVCODEC_MAX_AUDIO_FRAME_SIZE)));
+ audio_buffer_.reset(avcodec_alloc_frame());
video_buffer_.reset(avcodec_alloc_frame());
}
@@ -239,7 +238,7 @@ class FFmpegTest : public testing::TestWithParam<const char*> {
// Decode until output is produced, end of stream, or error.
while (true) {
int result = 0;
- int size_out = AVCODEC_MAX_AUDIO_FRAME_SIZE;
+ int got_audio = 0;
bool end_of_stream = false;
AVPacket packet;
@@ -250,25 +249,22 @@ class FFmpegTest : public testing::TestWithParam<const char*> {
memcpy(&packet, audio_packets_.peek(), sizeof(packet));
}
- result = avcodec_decode_audio3(av_audio_context(), audio_buffer_.get(),
- &size_out, audio_packets_.peek());
+ avcodec_get_frame_defaults(audio_buffer_.get());
+ result = avcodec_decode_audio4(av_audio_context(), audio_buffer_.get(),
+ &got_audio, &packet);
if (!audio_packets_.empty()) {
audio_packets_.pop();
}
EXPECT_GE(result, 0) << "Audio decode error.";
- if (result < 0 || (size_out == 0 && end_of_stream)) {
+ if (result < 0 || (got_audio == 0 && end_of_stream)) {
return false;
}
if (result > 0) {
- // TODO(scherkus): move this to ffmpeg_common.h and dedup.
- int64 denominator = av_audio_context()->channels *
- av_get_bytes_per_sample(av_audio_context()->sample_fmt) *
- av_audio_context()->sample_rate;
- double microseconds = size_out /
- (denominator /
- static_cast<double>(base::Time::kMicrosecondsPerSecond));
+ double microseconds = 1.0L * audio_buffer_->nb_samples /
+ av_audio_context()->sample_rate *
+ base::Time::kMicrosecondsPerSecond;
decoded_audio_duration_ = static_cast<int64>(microseconds);
if (packet.pts == static_cast<int64>(AV_NOPTS_VALUE)) {
@@ -307,6 +303,7 @@ class FFmpegTest : public testing::TestWithParam<const char*> {
memcpy(&packet, video_packets_.peek(), sizeof(packet));
}
+ avcodec_get_frame_defaults(video_buffer_.get());
av_video_context()->reordered_opaque = packet.pts;
result = avcodec_decode_video2(av_video_context(), video_buffer_.get(),
&got_picture, &packet);
@@ -407,7 +404,7 @@ class FFmpegTest : public testing::TestWithParam<const char*> {
AVPacketQueue audio_packets_;
AVPacketQueue video_packets_;
- scoped_ptr_malloc<int16, media::ScopedPtrAVFree> audio_buffer_;
+ scoped_ptr_malloc<AVFrame, media::ScopedPtrAVFree> audio_buffer_;
scoped_ptr_malloc<AVFrame, media::ScopedPtrAVFree> video_buffer_;
int64 decoded_audio_time_;
diff --git a/media/filters/audio_file_reader.cc b/media/filters/audio_file_reader.cc
index 974304c..7dc4b94 100644
--- a/media/filters/audio_file_reader.cc
+++ b/media/filters/audio_file_reader.cc
@@ -124,22 +124,21 @@ bool AudioFileReader::Read(const std::vector<float*>& audio_data,
return false;
}
- scoped_ptr_malloc<int16, ScopedPtrAVFree> output_buffer(
- static_cast<int16*>(av_malloc(AVCODEC_MAX_AUDIO_FRAME_SIZE)));
+ // Holds decoded audio.
+ scoped_ptr_malloc<AVFrame, ScopedPtrAVFree> av_frame(avcodec_alloc_frame());
// Read until we hit EOF or we've read the requested number of frames.
- AVPacket avpkt;
+ AVPacket packet;
int result = 0;
size_t current_frame = 0;
while (current_frame < number_of_frames &&
- (result = av_read_frame(format_context_, &avpkt)) >= 0) {
- int out_size = AVCODEC_MAX_AUDIO_FRAME_SIZE;
- result = avcodec_decode_audio3(codec_context_,
- output_buffer.get(),
- &out_size,
- &avpkt);
- av_free_packet(&avpkt);
+ (result = av_read_frame(format_context_, &packet)) >= 0) {
+ avcodec_get_frame_defaults(av_frame.get());
+ int frame_decoded = 0;
+ int result = avcodec_decode_audio4(
+ codec_context_, av_frame.get(), &frame_decoded, &packet);
+ av_free_packet(&packet);
if (result < 0) {
DLOG(WARNING)
@@ -150,10 +149,13 @@ bool AudioFileReader::Read(const std::vector<float*>& audio_data,
return current_frame > 0;
}
+ if (!frame_decoded)
+ continue;
+
// Determine the number of sample-frames we just decoded.
size_t bytes_per_sample =
av_get_bytes_per_sample(codec_context_->sample_fmt);
- size_t frames_read = out_size / (channels * bytes_per_sample);
+ size_t frames_read = av_frame->nb_samples;
// Truncate, if necessary, if the destination isn't big enough.
if (current_frame + frames_read > number_of_frames)
@@ -163,7 +165,7 @@ bool AudioFileReader::Read(const std::vector<float*>& audio_data,
// with nominal range -1.0 -> +1.0.
for (size_t channel_index = 0; channel_index < channels;
++channel_index) {
- if (!DeinterleaveAudioChannel(output_buffer.get(),
+ if (!DeinterleaveAudioChannel(av_frame->data[0],
audio_data[channel_index] + current_frame,
channels,
channel_index,
diff --git a/media/filters/ffmpeg_audio_decoder.cc b/media/filters/ffmpeg_audio_decoder.cc
index be750bf..f48c0fa 100644
--- a/media/filters/ffmpeg_audio_decoder.cc
+++ b/media/filters/ffmpeg_audio_decoder.cc
@@ -14,21 +14,9 @@
namespace media {
-// Returns true if the decode result was an error.
-static bool IsErrorResult(int result, int decoded_size) {
- return result < 0 ||
- decoded_size < 0 ||
- decoded_size > AVCODEC_MAX_AUDIO_FRAME_SIZE;
-}
-
-// Returns true if the decode result produced audio samples.
-static bool ProducedAudioSamples(int decoded_size) {
- return decoded_size > 0;
-}
-
// Returns true if the decode result was a timestamp packet and not actual audio
// data.
-static bool IsTimestampMarkerPacket(int result, Buffer* input) {
+static inline bool IsTimestampMarkerPacket(int result, Buffer* input) {
// We can get a positive result but no decoded data. This is ok because this
// this can be a marker packet that only contains timestamp.
return result > 0 && !input->IsEndOfStream() &&
@@ -37,7 +25,7 @@ static bool IsTimestampMarkerPacket(int result, Buffer* input) {
}
// Returns true if the decode result was end of stream.
-static bool IsEndOfStream(int result, int decoded_size, Buffer* input) {
+static inline bool IsEndOfStream(int result, int decoded_size, Buffer* input) {
// Three conditions to meet to declare end of stream for this decoder:
// 1. FFmpeg didn't read anything.
// 2. FFmpeg didn't output anything.
@@ -54,8 +42,7 @@ FFmpegAudioDecoder::FFmpegAudioDecoder(
bits_per_channel_(0),
channel_layout_(CHANNEL_LAYOUT_NONE),
samples_per_second_(0),
- decoded_audio_size_(AVCODEC_MAX_AUDIO_FRAME_SIZE),
- decoded_audio_(static_cast<uint8*>(av_malloc(decoded_audio_size_))) {
+ av_frame_(NULL) {
}
void FFmpegAudioDecoder::Initialize(
@@ -101,8 +88,6 @@ void FFmpegAudioDecoder::Reset(const base::Closure& closure) {
}
FFmpegAudioDecoder::~FFmpegAudioDecoder() {
- av_free(decoded_audio_);
-
// TODO(scherkus): should we require Stop() to be called? this might end up
// getting called on a random thread due to refcounting.
if (codec_context_) {
@@ -110,6 +95,11 @@ FFmpegAudioDecoder::~FFmpegAudioDecoder() {
avcodec_close(codec_context_);
av_free(codec_context_);
}
+
+ if (av_frame_) {
+ av_free(av_frame_);
+ av_frame_ = NULL;
+ }
}
void FFmpegAudioDecoder::DoInitialize(
@@ -134,7 +124,7 @@ void FFmpegAudioDecoder::DoInitialize(
}
// Initialize AVCodecContext structure.
- codec_context_ = avcodec_alloc_context();
+ codec_context_ = avcodec_alloc_context3(NULL);
AudioDecoderConfigToAVCodecContext(config, codec_context_);
AVCodec* codec = avcodec_find_decoder(codec_context_->codec_id);
@@ -147,6 +137,7 @@ void FFmpegAudioDecoder::DoInitialize(
}
// Success!
+ av_frame_ = avcodec_alloc_frame();
bits_per_channel_ = config.bits_per_channel();
channel_layout_ = config.channel_layout();
samples_per_second_ = config.samples_per_second();
@@ -198,12 +189,14 @@ void FFmpegAudioDecoder::DoDecodeBuffer(
PipelineStatistics statistics;
statistics.audio_bytes_decoded = input->GetDataSize();
- int decoded_audio_size = decoded_audio_size_;
- int result = avcodec_decode_audio3(
- codec_context_, reinterpret_cast<int16_t*>(decoded_audio_),
- &decoded_audio_size, &packet);
+ // Reset frame to default values.
+ avcodec_get_frame_defaults(av_frame_);
+
+ int frame_decoded = 0;
+ int result = avcodec_decode_audio4(
+ codec_context_, av_frame_, &frame_decoded, &packet);
- if (IsErrorResult(result, decoded_audio_size)) {
+ if (result < 0) {
DCHECK(!input->IsEndOfStream())
<< "End of stream buffer produced an error! "
<< "This is quite possibly a bug in the audio decoder not handling "
@@ -218,14 +211,21 @@ void FFmpegAudioDecoder::DoDecodeBuffer(
return;
}
+ int decoded_audio_size = 0;
+ if (frame_decoded) {
+ decoded_audio_size = av_samples_get_buffer_size(
+ NULL, codec_context_->channels, av_frame_->nb_samples,
+ codec_context_->sample_fmt, 1);
+ }
+
scoped_refptr<DataBuffer> output;
- if (ProducedAudioSamples(decoded_audio_size)) {
+ if (decoded_audio_size > 0) {
// Copy the audio samples into an output buffer.
output = new DataBuffer(decoded_audio_size);
output->SetDataSize(decoded_audio_size);
uint8* data = output->GetWritableData();
- memcpy(data, decoded_audio_, decoded_audio_size);
+ memcpy(data, av_frame_->data[0], decoded_audio_size);
UpdateDurationAndTimestamp(input, output);
} else if (IsTimestampMarkerPacket(result, input)) {
diff --git a/media/filters/ffmpeg_audio_decoder.h b/media/filters/ffmpeg_audio_decoder.h
index 5dbe895..bf3b1e4 100644
--- a/media/filters/ffmpeg_audio_decoder.h
+++ b/media/filters/ffmpeg_audio_decoder.h
@@ -12,6 +12,7 @@
#include "media/base/audio_decoder.h"
struct AVCodecContext;
+struct AVFrame;
namespace media {
@@ -74,11 +75,8 @@ class MEDIA_EXPORT FFmpegAudioDecoder : public AudioDecoder {
base::TimeDelta estimated_next_timestamp_;
- // Holds decoded audio. As required by FFmpeg, input/output buffers should
- // be allocated with suitable padding and alignment. av_malloc() provides
- // us that guarantee.
- const int decoded_audio_size_;
- uint8* decoded_audio_; // Allocated via av_malloc().
+ // Holds decoded audio.
+ AVFrame* av_frame_;
ReadCB read_cb_;
diff --git a/media/filters/ffmpeg_video_decoder.cc b/media/filters/ffmpeg_video_decoder.cc
index 81ddd8b..41bd0e4 100644
--- a/media/filters/ffmpeg_video_decoder.cc
+++ b/media/filters/ffmpeg_video_decoder.cc
@@ -96,7 +96,7 @@ void FFmpegVideoDecoder::Initialize(const scoped_refptr<DemuxerStream>& stream,
}
// Initialize AVCodecContext structure.
- codec_context_ = avcodec_alloc_context();
+ codec_context_ = avcodec_alloc_context3(NULL);
VideoDecoderConfigToAVCodecContext(config, codec_context_);
// Enable motion vector search (potentially slow), strong deblocking filter
@@ -323,6 +323,9 @@ bool FFmpegVideoDecoder::Decode(
// Let FFmpeg handle presentation timestamp reordering.
codec_context_->reordered_opaque = buffer->GetTimestamp().InMicroseconds();
+ // Reset frame to default values.
+ avcodec_get_frame_defaults(av_frame_);
+
// This is for codecs not using get_buffer to initialize
// |av_frame_->reordered_opaque|
av_frame_->reordered_opaque = codec_context_->reordered_opaque;
diff --git a/media/test/ffmpeg_tests/ffmpeg_tests.cc b/media/test/ffmpeg_tests/ffmpeg_tests.cc
index 557925f..fb62b64 100644
--- a/media/test/ffmpeg_tests/ffmpeg_tests.cc
+++ b/media/test/ffmpeg_tests/ffmpeg_tests.cc
@@ -238,13 +238,18 @@ int main(int argc, const char** argv) {
}
// Buffer used for audio decoding.
- scoped_ptr_malloc<int16, media::ScopedPtrAVFree> samples(
- reinterpret_cast<int16*>(av_malloc(AVCODEC_MAX_AUDIO_FRAME_SIZE)));
+ scoped_ptr_malloc<AVFrame, media::ScopedPtrAVFree> audio_frame(
+ avcodec_alloc_frame());
+ if (!audio_frame.get()) {
+ std::cerr << "Error: avcodec_alloc_frame for "
+ << in_path.value() << std::endl;
+ return 1;
+ }
// Buffer used for video decoding.
- scoped_ptr_malloc<AVFrame, media::ScopedPtrAVFree> frame(
+ scoped_ptr_malloc<AVFrame, media::ScopedPtrAVFree> video_frame(
avcodec_alloc_frame());
- if (!frame.get()) {
+ if (!video_frame.get()) {
std::cerr << "Error: avcodec_alloc_frame for "
<< in_path.value() << std::endl;
return 1;
@@ -284,20 +289,29 @@ int main(int argc, const char** argv) {
if (packet.stream_index == target_stream) {
int result = -1;
if (target_codec == AVMEDIA_TYPE_AUDIO) {
- int size_out = AVCODEC_MAX_AUDIO_FRAME_SIZE;
+ int size_out = 0;
+ int got_audio = 0;
+
+ avcodec_get_frame_defaults(audio_frame.get());
base::TimeTicks decode_start = base::TimeTicks::HighResNow();
- result = avcodec_decode_audio3(codec_context, samples.get(), &size_out,
- &packet);
+ result = avcodec_decode_audio4(codec_context, audio_frame.get(),
+ &got_audio, &packet);
base::TimeDelta delta = base::TimeTicks::HighResNow() - decode_start;
- if (size_out) {
+ if (got_audio) {
+ size_out = av_samples_get_buffer_size(
+ NULL, codec_context->channels, audio_frame->nb_samples,
+ codec_context->sample_fmt, 1);
+ }
+
+ if (got_audio && size_out) {
decode_times.push_back(delta.InMillisecondsF());
++frames;
read_result = 0; // Force continuation.
if (output) {
- if (fwrite(samples.get(), 1, size_out, output) !=
+ if (fwrite(audio_frame->data[0], 1, size_out, output) !=
static_cast<size_t>(size_out)) {
std::cerr << "Error: Could not write "
<< size_out << " bytes for " << in_path.value()
@@ -307,22 +321,24 @@ int main(int argc, const char** argv) {
}
const uint8* u8_samples =
- reinterpret_cast<const uint8*>(samples.get());
+ reinterpret_cast<const uint8*>(audio_frame->data[0]);
if (hash_djb2) {
hash_value = DJB2Hash(u8_samples, size_out, hash_value);
}
if (hash_md5) {
base::MD5Update(
&ctx,
- base::StringPiece(
- reinterpret_cast<const char*>(u8_samples), size_out));
+ base::StringPiece(reinterpret_cast<const char*>(u8_samples),
+ size_out));
}
}
} else if (target_codec == AVMEDIA_TYPE_VIDEO) {
int got_picture = 0;
+ avcodec_get_frame_defaults(video_frame.get());
+
base::TimeTicks decode_start = base::TimeTicks::HighResNow();
- result = avcodec_decode_video2(codec_context, frame.get(),
+ result = avcodec_decode_video2(codec_context, video_frame.get(),
&got_picture, &packet);
base::TimeDelta delta = base::TimeTicks::HighResNow() - decode_start;
@@ -332,8 +348,8 @@ int main(int argc, const char** argv) {
read_result = 0; // Force continuation.
for (int plane = 0; plane < 3; ++plane) {
- const uint8* source = frame->data[plane];
- const size_t source_stride = frame->linesize[plane];
+ const uint8* source = video_frame->data[plane];
+ const size_t source_stride = video_frame->linesize[plane];
size_t bytes_per_line = codec_context->width;
size_t copy_lines = codec_context->height;
if (plane != 0) {
diff --git a/media/tools/media_bench/media_bench.cc b/media/tools/media_bench/media_bench.cc
index 532aa7d..abba5f0 100644
--- a/media/tools/media_bench/media_bench.cc
+++ b/media/tools/media_bench/media_bench.cc
@@ -357,13 +357,18 @@ int main(int argc, const char** argv) {
}
// Buffer used for audio decoding.
- scoped_ptr_malloc<int16, media::ScopedPtrAVFree> samples(
- reinterpret_cast<int16*>(av_malloc(AVCODEC_MAX_AUDIO_FRAME_SIZE)));
+ scoped_ptr_malloc<AVFrame, media::ScopedPtrAVFree> audio_frame(
+ avcodec_alloc_frame());
+ if (!audio_frame.get()) {
+ std::cerr << "Error: avcodec_alloc_frame for "
+ << in_path.value() << std::endl;
+ return 1;
+ }
// Buffer used for video decoding.
- scoped_ptr_malloc<AVFrame, media::ScopedPtrAVFree> frame(
+ scoped_ptr_malloc<AVFrame, media::ScopedPtrAVFree> video_frame(
avcodec_alloc_frame());
- if (!frame.get()) {
+ if (!video_frame.get()) {
std::cerr << "Error: avcodec_alloc_frame for "
<< in_path.value() << std::endl;
return 1;
@@ -405,20 +410,29 @@ int main(int argc, const char** argv) {
if (packet.stream_index == target_stream) {
int result = -1;
if (target_codec == AVMEDIA_TYPE_AUDIO) {
- int size_out = AVCODEC_MAX_AUDIO_FRAME_SIZE;
+ int size_out = 0;
+ int got_audio = 0;
+
+ avcodec_get_frame_defaults(audio_frame.get());
base::TimeTicks decode_start = base::TimeTicks::HighResNow();
- result = avcodec_decode_audio3(codec_context, samples.get(), &size_out,
- &packet);
+ result = avcodec_decode_audio4(codec_context, audio_frame.get(),
+ &got_audio, &packet);
base::TimeDelta delta = base::TimeTicks::HighResNow() - decode_start;
- if (size_out) {
+ if (got_audio) {
+ size_out = av_samples_get_buffer_size(
+ NULL, codec_context->channels, audio_frame->nb_samples,
+ codec_context->sample_fmt, 1);
+ }
+
+ if (got_audio && size_out) {
decode_times.push_back(delta.InMillisecondsF());
++frames;
read_result = 0; // Force continuation.
if (output) {
- if (fwrite(samples.get(), 1, size_out, output) !=
+ if (fwrite(audio_frame->data[0], 1, size_out, output) !=
static_cast<size_t>(size_out)) {
std::cerr << "Error: Could not write "
<< size_out << " bytes for " << in_path.value()
@@ -428,7 +442,7 @@ int main(int argc, const char** argv) {
}
const uint8* u8_samples =
- reinterpret_cast<const uint8*>(samples.get());
+ reinterpret_cast<const uint8*>(audio_frame->data[0]);
if (hash_djb2) {
hash_value = DJB2Hash(u8_samples, size_out, hash_value);
}
@@ -442,8 +456,10 @@ int main(int argc, const char** argv) {
} else if (target_codec == AVMEDIA_TYPE_VIDEO) {
int got_picture = 0;
+ avcodec_get_frame_defaults(video_frame.get());
+
base::TimeTicks decode_start = base::TimeTicks::HighResNow();
- result = avcodec_decode_video2(codec_context, frame.get(),
+ result = avcodec_decode_video2(codec_context, video_frame.get(),
&got_picture, &packet);
base::TimeDelta delta = base::TimeTicks::HighResNow() - decode_start;
@@ -453,8 +469,8 @@ int main(int argc, const char** argv) {
read_result = 0; // Force continuation.
for (int plane = 0; plane < 3; ++plane) {
- const uint8* source = frame->data[plane];
- const size_t source_stride = frame->linesize[plane];
+ const uint8* source = video_frame->data[plane];
+ const size_t source_stride = video_frame->linesize[plane];
size_t bytes_per_line = codec_context->width;
size_t copy_lines = codec_context->height;
if (plane != 0) {