summaryrefslogtreecommitdiffstats
path: root/media
diff options
context:
space:
mode:
authorjrummell@chromium.org <jrummell@chromium.org@0039d316-1c4b-4281-b951-d872f2087c98>2013-08-16 01:31:25 +0000
committerjrummell@chromium.org <jrummell@chromium.org@0039d316-1c4b-4281-b951-d872f2087c98>2013-08-16 01:31:25 +0000
commit67afc63ec87612be374e5486cc03f51c1fb18eb6 (patch)
tree310cd24c11bfe2cfc6abea6ad698755f91cd4927 /media
parent0275a5528dd5dde0db50f588e88bc2b200f60c21 (diff)
downloadchromium_src-67afc63ec87612be374e5486cc03f51c1fb18eb6.zip
chromium_src-67afc63ec87612be374e5486cc03f51c1fb18eb6.tar.gz
chromium_src-67afc63ec87612be374e5486cc03f51c1fb18eb6.tar.bz2
Fix to make sure FFmpeg and AudioBuffer use the same alignment.
Also adding a test to verify that FFmpeg and Chrome use the same sample size for all supported formats. BUG=272550 TEST=media_unittests all pass, played various audio files in the browser (mono, stereo, 5.1). Review URL: https://chromiumcodereview.appspot.com/22959004 git-svn-id: svn://svn.chromium.org/chrome/trunk/src@217907 0039d316-1c4b-4281-b951-d872f2087c98
Diffstat (limited to 'media')
-rw-r--r--media/base/audio_buffer.cc7
-rw-r--r--media/base/audio_buffer.h9
-rw-r--r--media/ffmpeg/ffmpeg_common.h3
-rw-r--r--media/ffmpeg/ffmpeg_common_unittest.cc18
-rw-r--r--media/filters/ffmpeg_audio_decoder.cc36
5 files changed, 54 insertions, 19 deletions
diff --git a/media/base/audio_buffer.cc b/media/base/audio_buffer.cc
index b2cdd8c..0bf3720 100644
--- a/media/base/audio_buffer.cc
+++ b/media/base/audio_buffer.cc
@@ -11,11 +11,6 @@
namespace media {
-// Alignment of each channel's data; this must match what ffmpeg expects
-// (which may be 0, 16, or 32, depending on the processor). Selecting 32 in
-// order to work on all processors.
-enum { kChannelAlignment = 32 };
-
AudioBuffer::AudioBuffer(SampleFormat sample_format,
int channel_count,
int frame_count,
@@ -73,6 +68,8 @@ AudioBuffer::AudioBuffer(SampleFormat sample_format,
data_size *= channel_count;
data_.reset(
static_cast<uint8*>(base::AlignedAlloc(data_size, kChannelAlignment)));
+ channel_data_.reserve(1);
+ channel_data_.push_back(data_.get());
if (data)
memcpy(data_.get(), data[0], data_size);
}
diff --git a/media/base/audio_buffer.h b/media/base/audio_buffer.h
index e52355a..c3bcf4d 100644
--- a/media/base/audio_buffer.h
+++ b/media/base/audio_buffer.h
@@ -23,6 +23,11 @@ class AudioBus;
class MEDIA_EXPORT AudioBuffer
: public base::RefCountedThreadSafe<AudioBuffer> {
public:
+ // Alignment of each channel's data; this must match what ffmpeg expects
+ // (which may be 0, 16, or 32, depending on the processor). Selecting 32 in
+ // order to work on all processors.
+ enum { kChannelAlignment = 32 };
+
// Create an AudioBuffer whose channel data is copied from |data|. For
// interleaved data, only the first buffer is used. For planar data, the
// number of buffers must be equal to |channel_count|. |frame_count| is the
@@ -95,8 +100,8 @@ class MEDIA_EXPORT AudioBuffer
bool end_of_stream() const { return end_of_stream_; }
// Access to the raw buffer for ffmpeg to write directly to. Data for planar
- // data is grouped by channel.
- uint8* writable_data() { return data_.get(); }
+ // data is grouped by channel. There is only 1 entry for interleaved formats.
+ const std::vector<uint8*>& channel_data() const { return channel_data_; }
private:
friend class base::RefCountedThreadSafe<AudioBuffer>;
diff --git a/media/ffmpeg/ffmpeg_common.h b/media/ffmpeg/ffmpeg_common.h
index 99e1cc2..ccd2aa5 100644
--- a/media/ffmpeg/ffmpeg_common.h
+++ b/media/ffmpeg/ffmpeg_common.h
@@ -95,7 +95,8 @@ ChannelLayout ChannelLayoutToChromeChannelLayout(int64_t layout,
int channels);
// Converts FFmpeg's audio sample format to Chrome's SampleFormat.
-SampleFormat AVSampleFormatToSampleFormat(AVSampleFormat sample_format);
+MEDIA_EXPORT SampleFormat
+ AVSampleFormatToSampleFormat(AVSampleFormat sample_format);
// Converts FFmpeg's pixel formats to its corresponding supported video format.
VideoFrame::Format PixelFormatToVideoFormat(PixelFormat pixel_format);
diff --git a/media/ffmpeg/ffmpeg_common_unittest.cc b/media/ffmpeg/ffmpeg_common_unittest.cc
index 33ad46e..2fa61ac 100644
--- a/media/ffmpeg/ffmpeg_common_unittest.cc
+++ b/media/ffmpeg/ffmpeg_common_unittest.cc
@@ -79,4 +79,22 @@ TEST_F(FFmpegCommonTest, TimeBaseConversions) {
}
}
+TEST_F(FFmpegCommonTest, VerifyFormatSizes) {
+ for (AVSampleFormat format = AV_SAMPLE_FMT_NONE;
+ format < AV_SAMPLE_FMT_NB;
+ format = static_cast<AVSampleFormat>(format + 1)) {
+ SampleFormat sample_format = AVSampleFormatToSampleFormat(format);
+ if (sample_format == kUnknownSampleFormat) {
+ // This format not supported, so skip it.
+ continue;
+ }
+
+ // Have FFMpeg compute the size of a buffer of 1 channel / 1 frame
+ // with 1 byte alignment to make sure the sizes match.
+ int single_buffer_size = av_samples_get_buffer_size(NULL, 1, 1, format, 1);
+ int bytes_per_channel = SampleFormatToBytesPerChannel(sample_format);
+ EXPECT_EQ(bytes_per_channel, single_buffer_size);
+ }
+}
+
} // namespace media
diff --git a/media/filters/ffmpeg_audio_decoder.cc b/media/filters/ffmpeg_audio_decoder.cc
index 34fc793..1889e38 100644
--- a/media/filters/ffmpeg_audio_decoder.cc
+++ b/media/filters/ffmpeg_audio_decoder.cc
@@ -176,24 +176,38 @@ int FFmpegAudioDecoder::GetAudioBuffer(AVCodecContext* codec,
return AVERROR(EINVAL);
// Determine how big the buffer should be and allocate it. FFmpeg may adjust
- // how big each channel data is in order to meet it's alignment policy, so
+ // how big each channel data is in order to meet the alignment policy, so
// we need to take this into consideration.
int buffer_size_in_bytes =
- av_samples_get_buffer_size(NULL, channels, frame->nb_samples, format, 1);
+ av_samples_get_buffer_size(&frame->linesize[0],
+ channels,
+ frame->nb_samples,
+ format,
+ AudioBuffer::kChannelAlignment);
int frames_required = buffer_size_in_bytes / bytes_per_channel / channels;
DCHECK_GE(frames_required, frame->nb_samples);
scoped_refptr<AudioBuffer> buffer =
AudioBuffer::CreateBuffer(sample_format, channels, frames_required);
- // Initialize the data[], linesize[], and extended_data[] fields.
- int ret = avcodec_fill_audio_frame(frame,
- channels,
- format,
- buffer->writable_data(),
- buffer_size_in_bytes,
- 1);
- if (ret < 0)
- return ret;
+ // Initialize the data[] and extended_data[] fields to point into the memory
+ // allocated for AudioBuffer. |number_of_planes| will be 1 for interleaved
+ // audio and equal to |channels| for planar audio.
+ int number_of_planes = buffer->channel_data().size();
+ if (number_of_planes <= AV_NUM_DATA_POINTERS) {
+ DCHECK_EQ(frame->extended_data, frame->data);
+ for (int i = 0; i < number_of_planes; ++i)
+ frame->data[i] = buffer->channel_data()[i];
+ } else {
+ // There are more channels than can fit into data[], so allocate
+ // extended_data[] and fill appropriately.
+ frame->extended_data = static_cast<uint8**>(
+ av_malloc(number_of_planes * sizeof(*frame->extended_data)));
+ int i = 0;
+ for (; i < AV_NUM_DATA_POINTERS; ++i)
+ frame->extended_data[i] = frame->data[i] = buffer->channel_data()[i];
+ for (; i < number_of_planes; ++i)
+ frame->extended_data[i] = buffer->channel_data()[i];
+ }
// Now create an AVBufferRef for the data just allocated. It will own the
// reference to the AudioBuffer object.