summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--content/renderer/media/webmediaplayer_impl.cc12
-rw-r--r--media/base/audio_buffer_converter.cc245
-rw-r--r--media/base/audio_buffer_converter.h101
-rw-r--r--media/base/audio_buffer_converter_unittest.cc186
-rw-r--r--media/base/audio_converter.cc15
-rw-r--r--media/base/audio_decoder.h5
-rw-r--r--media/base/mock_filters.h3
-rw-r--r--media/filters/audio_renderer_impl.cc71
-rw-r--r--media/filters/audio_renderer_impl.h17
-rw-r--r--media/filters/audio_renderer_impl_unittest.cc53
-rw-r--r--media/filters/decoder_stream.cc4
-rw-r--r--media/filters/decoder_stream.h10
-rw-r--r--media/filters/decrypting_audio_decoder.cc28
-rw-r--r--media/filters/decrypting_audio_decoder.h12
-rw-r--r--media/filters/decrypting_audio_decoder_unittest.cc9
-rw-r--r--media/filters/ffmpeg_audio_decoder.cc68
-rw-r--r--media/filters/ffmpeg_audio_decoder.h12
-rw-r--r--media/filters/ffmpeg_audio_decoder_unittest.cc3
-rw-r--r--media/filters/opus_audio_decoder.cc53
-rw-r--r--media/filters/opus_audio_decoder.h9
-rw-r--r--media/filters/pipeline_integration_test_base.cc17
-rw-r--r--media/filters/pipeline_integration_test_base.h2
-rw-r--r--media/filters/source_buffer_stream.cc15
-rw-r--r--media/media.gyp3
-rw-r--r--media/tools/player_x11/player_x11.cc21
25 files changed, 706 insertions, 268 deletions
diff --git a/content/renderer/media/webmediaplayer_impl.cc b/content/renderer/media/webmediaplayer_impl.cc
index b31c524d..7af0999 100644
--- a/content/renderer/media/webmediaplayer_impl.cc
+++ b/content/renderer/media/webmediaplayer_impl.cc
@@ -40,6 +40,7 @@
#include "gpu/GLES2/gl2extchromium.h"
#include "gpu/command_buffer/common/mailbox_holder.h"
#include "media/audio/null_audio_sink.h"
+#include "media/base/audio_hardware_config.h"
#include "media/base/bind_to_current_loop.h"
#include "media/base/filter_collection.h"
#include "media/base/limits.h"
@@ -1165,11 +1166,12 @@ void WebMediaPlayerImpl::StartPipeline() {
audio_decoders.push_back(new media::FFmpegAudioDecoder(media_loop_));
audio_decoders.push_back(new media::OpusAudioDecoder(media_loop_));
- scoped_ptr<media::AudioRenderer> audio_renderer(
- new media::AudioRendererImpl(media_loop_,
- audio_source_provider_.get(),
- audio_decoders.Pass(),
- set_decryptor_ready_cb));
+ scoped_ptr<media::AudioRenderer> audio_renderer(new media::AudioRendererImpl(
+ media_loop_,
+ audio_source_provider_.get(),
+ audio_decoders.Pass(),
+ set_decryptor_ready_cb,
+ RenderThreadImpl::current()->GetAudioHardwareConfig()));
filter_collection->SetAudioRenderer(audio_renderer.Pass());
// Create our video decoders and renderer.
diff --git a/media/base/audio_buffer_converter.cc b/media/base/audio_buffer_converter.cc
new file mode 100644
index 0000000..fd4ef43
--- /dev/null
+++ b/media/base/audio_buffer_converter.cc
@@ -0,0 +1,245 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "media/base/audio_buffer_converter.h"
+
+#include <cmath>
+
+#include "base/logging.h"
+#include "media/base/audio_buffer.h"
+#include "media/base/audio_bus.h"
+#include "media/base/audio_decoder_config.h"
+#include "media/base/audio_timestamp_helper.h"
+#include "media/base/buffers.h"
+#include "media/base/sinc_resampler.h"
+#include "media/base/vector_math.h"
+
+namespace media {
+
+// Is the config presented by |buffer| a config change from |params|?
+static bool IsConfigChange(const AudioParameters& params,
+ const scoped_refptr<AudioBuffer>& buffer) {
+ return buffer->sample_rate() != params.sample_rate() ||
+ buffer->channel_count() != params.channels() ||
+ buffer->channel_layout() != params.channel_layout();
+}
+
+AudioBufferConverter::AudioBufferConverter(const AudioParameters& output_params)
+ : output_params_(output_params),
+ input_params_(output_params),
+ last_input_buffer_offset_(0),
+ input_frames_(0),
+ buffered_input_frames_(0.0),
+ io_sample_rate_ratio_(1.0),
+ timestamp_helper_(output_params_.sample_rate()),
+ is_flushing_(false) {}
+
+AudioBufferConverter::~AudioBufferConverter() {}
+
+void AudioBufferConverter::AddInput(const scoped_refptr<AudioBuffer>& buffer) {
+ // On EOS flush any remaining buffered data.
+ if (buffer->end_of_stream()) {
+ Flush();
+ queued_outputs_.push_back(buffer);
+ return;
+ }
+
+ // We'll need a new |audio_converter_| if there was a config change.
+ if (IsConfigChange(input_params_, buffer))
+ ResetConverter(buffer);
+
+ // Pass straight through if there's no work to be done.
+ if (!audio_converter_) {
+ queued_outputs_.push_back(buffer);
+ return;
+ }
+
+ if (timestamp_helper_.base_timestamp() == kNoTimestamp())
+ timestamp_helper_.SetBaseTimestamp(buffer->timestamp());
+
+ queued_inputs_.push_back(buffer);
+ input_frames_ += buffer->frame_count();
+
+ ConvertIfPossible();
+}
+
+bool AudioBufferConverter::HasNextBuffer() { return !queued_outputs_.empty(); }
+
+scoped_refptr<AudioBuffer> AudioBufferConverter::GetNextBuffer() {
+ DCHECK(!queued_outputs_.empty());
+ scoped_refptr<AudioBuffer> out = queued_outputs_.front();
+ queued_outputs_.pop_front();
+ return out;
+}
+
+void AudioBufferConverter::Reset() {
+ audio_converter_.reset();
+ queued_inputs_.clear();
+ queued_outputs_.clear();
+ timestamp_helper_.SetBaseTimestamp(kNoTimestamp());
+ input_params_ = output_params_;
+ input_frames_ = 0;
+ buffered_input_frames_ = 0.0;
+ last_input_buffer_offset_ = 0;
+}
+
+void AudioBufferConverter::ResetTimestampState() {
+ Flush();
+ timestamp_helper_.SetBaseTimestamp(kNoTimestamp());
+}
+
+double AudioBufferConverter::ProvideInput(AudioBus* audio_bus,
+ base::TimeDelta buffer_delay) {
+ DCHECK(is_flushing_ || input_frames_ >= audio_bus->frames());
+
+ int requested_frames_left = audio_bus->frames();
+ int dest_index = 0;
+
+ while (requested_frames_left > 0 && !queued_inputs_.empty()) {
+ scoped_refptr<AudioBuffer> input_buffer = queued_inputs_.front();
+
+ int frames_to_read =
+ std::min(requested_frames_left,
+ input_buffer->frame_count() - last_input_buffer_offset_);
+ input_buffer->ReadFrames(
+ frames_to_read, last_input_buffer_offset_, dest_index, audio_bus);
+ last_input_buffer_offset_ += frames_to_read;
+
+ if (last_input_buffer_offset_ == input_buffer->frame_count()) {
+ // We've consumed all the frames in |input_buffer|.
+ queued_inputs_.pop_front();
+ last_input_buffer_offset_ = 0;
+ }
+
+ requested_frames_left -= frames_to_read;
+ dest_index += frames_to_read;
+ }
+
+ // If we're flushing, zero any extra space, otherwise we should always have
+ // enough data to completely fulfill the request.
+ if (is_flushing_ && requested_frames_left > 0) {
+ audio_bus->ZeroFramesPartial(audio_bus->frames() - requested_frames_left,
+ requested_frames_left);
+ } else {
+ DCHECK_EQ(requested_frames_left, 0);
+ }
+
+ input_frames_ -= audio_bus->frames() - requested_frames_left;
+ DCHECK_GE(input_frames_, 0);
+
+ buffered_input_frames_ += audio_bus->frames() - requested_frames_left;
+
+ // Full volume.
+ return 1.0;
+}
+
+void AudioBufferConverter::ResetConverter(
+ const scoped_refptr<AudioBuffer>& buffer) {
+ Flush();
+ audio_converter_.reset();
+ input_params_.Reset(
+ input_params_.format(),
+ buffer->channel_layout(),
+ buffer->channel_count(),
+ 0,
+ buffer->sample_rate(),
+ input_params_.bits_per_sample(),
+ // This is arbitrary, but small buffer sizes result in a lot of tiny
+ // ProvideInput calls, so we'll use at least the SincResampler's default
+ // request size.
+ std::max(buffer->frame_count(),
+ static_cast<int>(SincResampler::kDefaultRequestSize)));
+
+ io_sample_rate_ratio_ = static_cast<double>(input_params_.sample_rate()) /
+ output_params_.sample_rate();
+
+ // If |buffer| matches |output_params_| we don't need an AudioConverter at
+ // all, and can early-out here.
+ if (!IsConfigChange(output_params_, buffer))
+ return;
+
+ audio_converter_.reset(
+ new AudioConverter(input_params_, output_params_, true));
+ audio_converter_->AddInput(this);
+}
+
+void AudioBufferConverter::ConvertIfPossible() {
+ DCHECK(audio_converter_);
+
+ int request_frames = 0;
+
+ if (is_flushing_) {
+ // If we're flushing we want to convert *everything* even if this means
+ // we'll have to pad some silence in ProvideInput().
+ request_frames =
+ ceil((buffered_input_frames_ + input_frames_) / io_sample_rate_ratio_);
+ } else {
+ // How many calls to ProvideInput() we can satisfy completely.
+ int chunks = input_frames_ / input_params_.frames_per_buffer();
+
+ // How many output frames that corresponds to:
+ request_frames = chunks * audio_converter_->ChunkSize();
+ }
+
+ if (!request_frames)
+ return;
+
+ scoped_refptr<AudioBuffer> output_buffer =
+ AudioBuffer::CreateBuffer(kSampleFormatPlanarF32,
+ output_params_.channel_layout(),
+ output_params_.sample_rate(),
+ request_frames);
+ scoped_ptr<AudioBus> output_bus =
+ AudioBus::CreateWrapper(output_buffer->channel_count());
+
+ int frames_remaining = request_frames;
+
+ // The AudioConverter wants requests of a fixed size, so we'll slide an
+ // AudioBus of that size across the |output_buffer|.
+ while (frames_remaining != 0) {
+ int frames_this_iteration =
+ std::min(output_params_.frames_per_buffer(), frames_remaining);
+
+ int offset_into_buffer = output_buffer->frame_count() - frames_remaining;
+
+ // Wrap the portion of the AudioBuffer in an AudioBus so the AudioConverter
+ // can fill it.
+ output_bus->set_frames(frames_this_iteration);
+ for (int ch = 0; ch < output_buffer->channel_count(); ++ch) {
+ output_bus->SetChannelData(
+ ch,
+ reinterpret_cast<float*>(output_buffer->channel_data()[ch]) +
+ offset_into_buffer);
+ }
+
+ // Do the actual conversion.
+ audio_converter_->Convert(output_bus.get());
+ frames_remaining -= frames_this_iteration;
+ buffered_input_frames_ -= frames_this_iteration * io_sample_rate_ratio_;
+ }
+
+ // Compute the timestamp.
+ output_buffer->set_timestamp(timestamp_helper_.GetTimestamp());
+ output_buffer->set_duration(
+ timestamp_helper_.GetFrameDuration(request_frames));
+ timestamp_helper_.AddFrames(request_frames);
+
+ queued_outputs_.push_back(output_buffer);
+}
+
+void AudioBufferConverter::Flush() {
+ if (!audio_converter_)
+ return;
+ is_flushing_ = true;
+ ConvertIfPossible();
+ is_flushing_ = false;
+ audio_converter_->Reset();
+ DCHECK_EQ(input_frames_, 0);
+ DCHECK_EQ(last_input_buffer_offset_, 0);
+ DCHECK_LT(buffered_input_frames_, 1.0);
+ DCHECK(queued_inputs_.empty());
+ buffered_input_frames_ = 0.0;
+}
+
+} // namespace media
diff --git a/media/base/audio_buffer_converter.h b/media/base/audio_buffer_converter.h
new file mode 100644
index 0000000..9785b10
--- /dev/null
+++ b/media/base/audio_buffer_converter.h
@@ -0,0 +1,101 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef MEDIA_BASE_AUDIO_BUFFER_CONVERTER
+#define MEDIA_BASE_AUDIO_BUFFER_CONVERTER
+
+#include <deque>
+
+#include "base/memory/ref_counted.h"
+#include "base/memory/scoped_ptr.h"
+#include "base/time/time.h"
+#include "media/audio/audio_parameters.h"
+#include "media/base/audio_converter.h"
+#include "media/base/audio_timestamp_helper.h"
+#include "media/base/media_export.h"
+
+namespace media {
+
+class AudioBuffer;
+class AudioBus;
+
+// Takes AudioBuffers in any format and uses an AudioConverter to convert them
+// to a common format (usually the hardware output format).
+class MEDIA_EXPORT AudioBufferConverter : public AudioConverter::InputCallback {
+ public:
+ explicit AudioBufferConverter(const AudioParameters& output_params);
+ virtual ~AudioBufferConverter();
+
+ void AddInput(const scoped_refptr<AudioBuffer>& buffer);
+
+ // Is an output buffer available via GetNextBuffer()?
+ bool HasNextBuffer();
+
+ // This should only be called this is HasNextBuffer() returns true.
+ scoped_refptr<AudioBuffer> GetNextBuffer();
+
+ // Reset internal state.
+ void Reset();
+
+ // Reset internal timestamp state. Upon the next AddInput() call, our base
+ // timestamp will be set to match the input buffer.
+ void ResetTimestampState();
+
+ private:
+ // Callback to provide data to the AudioConverter
+ virtual double ProvideInput(AudioBus* audio_bus,
+ base::TimeDelta buffer_delay) OVERRIDE;
+
+ // Reset the converter in response to a configuration change.
+ void ResetConverter(const scoped_refptr<AudioBuffer>& input_buffer);
+
+ // Perform conversion if we have enough data.
+ void ConvertIfPossible();
+
+ // Flush remaining output
+ void Flush();
+
+ // The output parameters.
+ AudioParameters output_params_;
+
+ // The current input parameters (we cache these to detect configuration
+ // changes, so we know when to reset the AudioConverter).
+ AudioParameters input_params_;
+
+ typedef std::deque<scoped_refptr<AudioBuffer> > BufferQueue;
+
+ // Queued up inputs (there will never be all that much data stored here, as
+ // soon as there's enough here to produce an output buffer we will do so).
+ BufferQueue queued_inputs_;
+
+ // Offset into the front element of |queued_inputs_|. A ProvideInput() call
+ // doesn't necessarily always consume an entire buffer.
+ int last_input_buffer_offset_;
+
+ // Buffer of output frames, to be returned by GetNextBuffer().
+ BufferQueue queued_outputs_;
+
+ // How many frames of input we have in |queued_inputs_|.
+ int input_frames_;
+
+ // Input frames in the AudioConverter's internal buffers.
+ double buffered_input_frames_;
+
+ // Ratio of sample rates, in/out.
+ double io_sample_rate_ratio_;
+
+ // Computes timestamps in terms of the output sample rate.
+ AudioTimestampHelper timestamp_helper_;
+
+ // Are we flushing everything, without regard for providing AudioConverter
+ // full AudioBuses in ProvideInput()?
+ bool is_flushing_;
+
+ // The AudioConverter which does the real work here.
+ scoped_ptr<AudioConverter> audio_converter_;
+};
+
+} // namespace media
+
+#endif // MEDIA_BASE_AUDIO_BUFFER_CONVERTER
diff --git a/media/base/audio_buffer_converter_unittest.cc b/media/base/audio_buffer_converter_unittest.cc
new file mode 100644
index 0000000..80567cf
--- /dev/null
+++ b/media/base/audio_buffer_converter_unittest.cc
@@ -0,0 +1,186 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/memory/scoped_ptr.h"
+#include "media/base/audio_buffer.h"
+#include "media/base/audio_buffer_converter.h"
+#include "media/base/sinc_resampler.h"
+#include "media/base/test_helpers.h"
+#include "testing/gmock/include/gmock/gmock.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace media {
+
+const int kOutSampleRate = 44100;
+const ChannelLayout kOutChannelLayout = CHANNEL_LAYOUT_STEREO;
+
+static scoped_refptr<AudioBuffer> MakeTestBuffer(int sample_rate,
+ ChannelLayout channel_layout,
+ int frames) {
+ return MakeAudioBuffer<uint8>(kSampleFormatU8,
+ channel_layout,
+ sample_rate,
+ 0,
+ 1,
+ frames,
+ base::TimeDelta::FromSeconds(0),
+ base::TimeDelta::FromSeconds(0));
+}
+
+class AudioBufferConverterTest : public ::testing::Test {
+ public:
+ AudioBufferConverterTest()
+ : input_frames_(0), expected_output_frames_(0.0), output_frames_(0) {
+ AudioParameters output_params(AudioParameters::AUDIO_PCM_LOW_LATENCY,
+ kOutChannelLayout,
+ kOutSampleRate,
+ 16,
+ 512);
+ audio_buffer_converter_.reset(new AudioBufferConverter(output_params));
+ }
+
+ void Reset() {
+ audio_buffer_converter_->Reset();
+ output_frames_ = expected_output_frames_ = input_frames_ = 0;
+ }
+
+ void AddInput(const scoped_refptr<AudioBuffer>& in) {
+ if (!in->end_of_stream()) {
+ input_frames_ += in->frame_count();
+ expected_output_frames_ +=
+ in->frame_count() *
+ (static_cast<double>(kOutSampleRate) / in->sample_rate());
+ }
+ audio_buffer_converter_->AddInput(in);
+ }
+
+ void ConsumeAllOutput() {
+ AddInput(AudioBuffer::CreateEOSBuffer());
+ while (audio_buffer_converter_->HasNextBuffer()) {
+ scoped_refptr<AudioBuffer> out = audio_buffer_converter_->GetNextBuffer();
+ if (!out->end_of_stream()) {
+ output_frames_ += out->frame_count();
+ EXPECT_EQ(out->sample_rate(), kOutSampleRate);
+ EXPECT_EQ(out->channel_layout(), kOutChannelLayout);
+ } else {
+ EXPECT_FALSE(audio_buffer_converter_->HasNextBuffer());
+ }
+ }
+ EXPECT_EQ(output_frames_, ceil(expected_output_frames_));
+ }
+
+ private:
+ scoped_ptr<AudioBufferConverter> audio_buffer_converter_;
+
+ int input_frames_;
+ double expected_output_frames_;
+ int output_frames_;
+};
+
+TEST_F(AudioBufferConverterTest, PassThrough) {
+ scoped_refptr<AudioBuffer> in =
+ MakeTestBuffer(kOutSampleRate, kOutChannelLayout, 512);
+ AddInput(in);
+ ConsumeAllOutput();
+}
+
+TEST_F(AudioBufferConverterTest, Downsample) {
+ scoped_refptr<AudioBuffer> in = MakeTestBuffer(48000, kOutChannelLayout, 512);
+ AddInput(in);
+ ConsumeAllOutput();
+}
+
+TEST_F(AudioBufferConverterTest, Upsample) {
+ scoped_refptr<AudioBuffer> in = MakeTestBuffer(8000, kOutChannelLayout, 512);
+ AddInput(in);
+ ConsumeAllOutput();
+}
+
+// Test resampling a buffer smaller than the SincResampler's kernel size.
+TEST_F(AudioBufferConverterTest, Resample_TinyBuffer) {
+ AddInput(MakeTestBuffer(
+ 48000, CHANNEL_LAYOUT_STEREO, SincResampler::kKernelSize - 1));
+ ConsumeAllOutput();
+}
+
+TEST_F(AudioBufferConverterTest, Resample_DifferingBufferSizes) {
+ const int input_sample_rate = 48000;
+ AddInput(MakeTestBuffer(input_sample_rate, kOutChannelLayout, 100));
+ AddInput(MakeTestBuffer(input_sample_rate, kOutChannelLayout, 200));
+ AddInput(MakeTestBuffer(input_sample_rate, kOutChannelLayout, 300));
+ AddInput(MakeTestBuffer(input_sample_rate, kOutChannelLayout, 400));
+ AddInput(MakeTestBuffer(input_sample_rate, kOutChannelLayout, 500));
+ ConsumeAllOutput();
+}
+
+TEST_F(AudioBufferConverterTest, ChannelDownmix) {
+ scoped_refptr<AudioBuffer> in =
+ MakeTestBuffer(kOutSampleRate, CHANNEL_LAYOUT_MONO, 512);
+ AddInput(in);
+ ConsumeAllOutput();
+}
+
+TEST_F(AudioBufferConverterTest, ChannelUpmix) {
+ scoped_refptr<AudioBuffer> in =
+ MakeTestBuffer(kOutSampleRate, CHANNEL_LAYOUT_5_1, 512);
+ AddInput(in);
+ ConsumeAllOutput();
+}
+
+TEST_F(AudioBufferConverterTest, ResampleAndRemix) {
+ scoped_refptr<AudioBuffer> in =
+ MakeTestBuffer(48000, CHANNEL_LAYOUT_5_1, 512);
+ AddInput(in);
+ ConsumeAllOutput();
+}
+
+TEST_F(AudioBufferConverterTest, ConfigChange_SampleRate) {
+ AddInput(MakeTestBuffer(48000, kOutChannelLayout, 512));
+ AddInput(MakeTestBuffer(44100, kOutChannelLayout, 512));
+ ConsumeAllOutput();
+}
+
+TEST_F(AudioBufferConverterTest, ConfigChange_ChannelLayout) {
+ AddInput(MakeTestBuffer(kOutSampleRate, CHANNEL_LAYOUT_STEREO, 512));
+ AddInput(MakeTestBuffer(kOutSampleRate, CHANNEL_LAYOUT_MONO, 512));
+ ConsumeAllOutput();
+}
+
+TEST_F(AudioBufferConverterTest, ConfigChange_SampleRateAndChannelLayout) {
+ AddInput(MakeTestBuffer(44100, CHANNEL_LAYOUT_STEREO, 512));
+ AddInput(MakeTestBuffer(48000, CHANNEL_LAYOUT_MONO, 512));
+ ConsumeAllOutput();
+}
+
+TEST_F(AudioBufferConverterTest, ConfigChange_Multiple) {
+ AddInput(MakeTestBuffer(44100, CHANNEL_LAYOUT_STEREO, 512));
+ AddInput(MakeTestBuffer(48000, CHANNEL_LAYOUT_MONO, 512));
+ AddInput(MakeTestBuffer(44100, CHANNEL_LAYOUT_5_1, 512));
+ AddInput(MakeTestBuffer(22050, CHANNEL_LAYOUT_STEREO, 512));
+ ConsumeAllOutput();
+}
+
+TEST_F(AudioBufferConverterTest, Reset) {
+ AddInput(MakeTestBuffer(44100, CHANNEL_LAYOUT_STEREO, 512));
+ Reset();
+ ConsumeAllOutput();
+}
+
+TEST_F(AudioBufferConverterTest, ResampleThenReset) {
+ // Resampling is likely to leave some data buffered in AudioConverter's
+ // fifo or resampler, so make sure Reset() cleans that all up.
+ AddInput(MakeTestBuffer(48000, CHANNEL_LAYOUT_STEREO, 512));
+ Reset();
+ ConsumeAllOutput();
+}
+
+TEST_F(AudioBufferConverterTest, ResetThenConvert) {
+ AddInput(MakeTestBuffer(kOutSampleRate, kOutChannelLayout, 512));
+ Reset();
+ // Make sure we can keep using the AudioBufferConverter after we've Reset().
+ AddInput(MakeTestBuffer(kOutSampleRate, kOutChannelLayout, 512));
+ ConsumeAllOutput();
+}
+
+} // namespace media
diff --git a/media/base/audio_converter.cc b/media/base/audio_converter.cc
index 195a227..aa0be4f 100644
--- a/media/base/audio_converter.cc
+++ b/media/base/audio_converter.cc
@@ -25,7 +25,7 @@ namespace media {
AudioConverter::AudioConverter(const AudioParameters& input_params,
const AudioParameters& output_params,
bool disable_fifo)
- : chunk_size_(output_params.frames_per_buffer()),
+ : chunk_size_(input_params.frames_per_buffer()),
downmix_early_(false),
resampler_frame_delay_(0),
input_channel_count_(input_params.channels()) {
@@ -48,15 +48,16 @@ AudioConverter::AudioConverter(const AudioParameters& input_params,
if (input_params.sample_rate() != output_params.sample_rate()) {
DVLOG(1) << "Resampling from " << input_params.sample_rate() << " to "
<< output_params.sample_rate();
- const double io_sample_rate_ratio = input_params.sample_rate() /
- static_cast<double>(output_params.sample_rate());
const int request_size = disable_fifo ? SincResampler::kDefaultRequestSize :
input_params.frames_per_buffer();
+ const double io_sample_rate_ratio =
+ input_params.sample_rate() /
+ static_cast<double>(output_params.sample_rate());
resampler_.reset(new MultiChannelResampler(
- downmix_early_ ? output_params.channels() :
- input_params.channels(),
- io_sample_rate_ratio, request_size, base::Bind(
- &AudioConverter::ProvideInput, base::Unretained(this))));
+ downmix_early_ ? output_params.channels() : input_params.channels(),
+ io_sample_rate_ratio,
+ request_size,
+ base::Bind(&AudioConverter::ProvideInput, base::Unretained(this))));
}
input_frame_duration_ = base::TimeDelta::FromMicroseconds(
diff --git a/media/base/audio_decoder.h b/media/base/audio_decoder.h
index 901126d..b1ee2cd 100644
--- a/media/base/audio_decoder.h
+++ b/media/base/audio_decoder.h
@@ -69,11 +69,6 @@ class MEDIA_EXPORT AudioDecoder {
// complete before deleting the decoder.
virtual void Stop(const base::Closure& closure) = 0;
- // Returns various information about the decoded audio format.
- virtual int bits_per_channel() = 0;
- virtual ChannelLayout channel_layout() = 0;
- virtual int samples_per_second() = 0;
-
private:
DISALLOW_COPY_AND_ASSIGN(AudioDecoder);
};
diff --git a/media/base/mock_filters.h b/media/base/mock_filters.h
index 339c370..0392d11 100644
--- a/media/base/mock_filters.h
+++ b/media/base/mock_filters.h
@@ -97,9 +97,6 @@ class MockAudioDecoder : public AudioDecoder {
MOCK_METHOD2(Decode,
void(const scoped_refptr<DecoderBuffer>& buffer,
const DecodeCB&));
- MOCK_METHOD0(bits_per_channel, int(void));
- MOCK_METHOD0(channel_layout, ChannelLayout(void));
- MOCK_METHOD0(samples_per_second, int(void));
MOCK_METHOD1(Reset, void(const base::Closure&));
MOCK_METHOD1(Stop, void(const base::Closure&));
diff --git a/media/filters/audio_renderer_impl.cc b/media/filters/audio_renderer_impl.cc
index 509ae95..acb43f7 100644
--- a/media/filters/audio_renderer_impl.cc
+++ b/media/filters/audio_renderer_impl.cc
@@ -15,6 +15,8 @@
#include "base/metrics/histogram.h"
#include "base/single_thread_task_runner.h"
#include "media/base/audio_buffer.h"
+#include "media/base/audio_buffer_converter.h"
+#include "media/base/audio_hardware_config.h"
#include "media/base/audio_splicer.h"
#include "media/base/bind_to_current_loop.h"
#include "media/base/demuxer_stream.h"
@@ -41,12 +43,14 @@ AudioRendererImpl::AudioRendererImpl(
const scoped_refptr<base::SingleThreadTaskRunner>& task_runner,
media::AudioRendererSink* sink,
ScopedVector<AudioDecoder> decoders,
- const SetDecryptorReadyCB& set_decryptor_ready_cb)
+ const SetDecryptorReadyCB& set_decryptor_ready_cb,
+ AudioHardwareConfig* hardware_config)
: task_runner_(task_runner),
sink_(sink),
audio_buffer_stream_(task_runner,
decoders.Pass(),
set_decryptor_ready_cb),
+ hardware_config_(hardware_config),
now_cb_(base::Bind(&base::TimeTicks::Now)),
state_(kUninitialized),
sink_playing_(false),
@@ -60,6 +64,8 @@ AudioRendererImpl::AudioRendererImpl(
weak_factory_(this) {
audio_buffer_stream_.set_splice_observer(base::Bind(
&AudioRendererImpl::OnNewSpliceBuffer, weak_factory_.GetWeakPtr()));
+ audio_buffer_stream_.set_config_change_observer(base::Bind(
+ &AudioRendererImpl::OnConfigChange, weak_factory_.GetWeakPtr()));
}
AudioRendererImpl::~AudioRendererImpl() {
@@ -171,6 +177,8 @@ void AudioRendererImpl::ResetDecoderDone() {
earliest_end_time_ = now_cb_.Run();
splicer_->Reset();
+ if (buffer_converter_)
+ buffer_converter_->Reset();
algorithm_->FlushBuffers();
}
base::ResetAndReturn(&flush_cb_).Run();
@@ -253,6 +261,26 @@ void AudioRendererImpl::Initialize(DemuxerStream* stream,
disabled_cb_ = disabled_cb;
error_cb_ = error_cb;
+ expecting_config_changes_ = stream->SupportsConfigChanges();
+ if (!expecting_config_changes_) {
+ // The actual buffer size is controlled via the size of the AudioBus
+ // provided to Render(), so just choose something reasonable here for looks.
+ int buffer_size = stream->audio_decoder_config().samples_per_second() / 100;
+ audio_parameters_.Reset(
+ AudioParameters::AUDIO_PCM_LOW_LATENCY,
+ stream->audio_decoder_config().channel_layout(),
+ ChannelLayoutToChannelCount(
+ stream->audio_decoder_config().channel_layout()),
+ 0,
+ stream->audio_decoder_config().samples_per_second(),
+ stream->audio_decoder_config().bits_per_channel(),
+ buffer_size);
+ buffer_converter_.reset();
+ } else {
+ // TODO(rileya): Support hardware config changes
+ audio_parameters_ = hardware_config_->GetOutputConfig();
+ }
+
audio_buffer_stream_.Initialize(
stream,
statistics_cb,
@@ -276,27 +304,15 @@ void AudioRendererImpl::OnAudioBufferStreamInitialized(bool success) {
return;
}
- int sample_rate = audio_buffer_stream_.decoder()->samples_per_second();
-
- // The actual buffer size is controlled via the size of the AudioBus
- // provided to Render(), so just choose something reasonable here for looks.
- int buffer_size = audio_buffer_stream_.decoder()->samples_per_second() / 100;
-
- // TODO(rileya): Remove the channel_layout/bits_per_channel/samples_per_second
- // getters from AudioDecoder, and adjust this accordingly.
- audio_parameters_ =
- AudioParameters(AudioParameters::AUDIO_PCM_LOW_LATENCY,
- audio_buffer_stream_.decoder()->channel_layout(),
- sample_rate,
- audio_buffer_stream_.decoder()->bits_per_channel(),
- buffer_size);
if (!audio_parameters_.IsValid()) {
ChangeState_Locked(kUninitialized);
base::ResetAndReturn(&init_cb_).Run(PIPELINE_ERROR_INITIALIZATION_FAILED);
return;
}
- splicer_.reset(new AudioSplicer(sample_rate));
+ if (expecting_config_changes_)
+ buffer_converter_.reset(new AudioBufferConverter(audio_parameters_));
+ splicer_.reset(new AudioSplicer(audio_parameters_.sample_rate()));
// We're all good! Continue initializing the rest of the audio renderer
// based on the decoder format.
@@ -376,9 +392,20 @@ void AudioRendererImpl::DecodedAudioReady(
return;
}
- if (!splicer_->AddInput(buffer)) {
- HandleAbortedReadOrDecodeError(true);
- return;
+ if (expecting_config_changes_) {
+ DCHECK(buffer_converter_);
+ buffer_converter_->AddInput(buffer);
+ while (buffer_converter_->HasNextBuffer()) {
+ if (!splicer_->AddInput(buffer_converter_->GetNextBuffer())) {
+ HandleAbortedReadOrDecodeError(true);
+ return;
+ }
+ }
+ } else {
+ if (!splicer_->AddInput(buffer)) {
+ HandleAbortedReadOrDecodeError(true);
+ return;
+ }
}
if (!splicer_->HasNextBuffer()) {
@@ -719,4 +746,10 @@ void AudioRendererImpl::OnNewSpliceBuffer(base::TimeDelta splice_timestamp) {
splicer_->SetSpliceTimestamp(splice_timestamp);
}
+void AudioRendererImpl::OnConfigChange() {
+ DCHECK(task_runner_->BelongsToCurrentThread());
+ DCHECK(expecting_config_changes_);
+ buffer_converter_->ResetTimestampState();
+}
+
} // namespace media
diff --git a/media/filters/audio_renderer_impl.h b/media/filters/audio_renderer_impl.h
index d5bc21d..f6f41f1 100644
--- a/media/filters/audio_renderer_impl.h
+++ b/media/filters/audio_renderer_impl.h
@@ -38,8 +38,10 @@ class SingleThreadTaskRunner;
namespace media {
class AudioBus;
+class AudioBufferConverter;
class AudioSplicer;
class DecryptingDemuxerStream;
+class AudioHardwareConfig;
class MEDIA_EXPORT AudioRendererImpl
: public AudioRenderer,
@@ -57,7 +59,8 @@ class MEDIA_EXPORT AudioRendererImpl
const scoped_refptr<base::SingleThreadTaskRunner>& task_runner,
AudioRendererSink* sink,
ScopedVector<AudioDecoder> decoders,
- const SetDecryptorReadyCB& set_decryptor_ready_cb);
+ const SetDecryptorReadyCB& set_decryptor_ready_cb,
+ AudioHardwareConfig* hardware_params);
virtual ~AudioRendererImpl();
// AudioRenderer implementation.
@@ -182,9 +185,16 @@ class MEDIA_EXPORT AudioRendererImpl
// Called by the AudioBufferStream when a splice buffer is demuxed.
void OnNewSpliceBuffer(base::TimeDelta);
+ // Called by the AudioBufferStream when a config change occurs.
+ void OnConfigChange();
+
scoped_refptr<base::SingleThreadTaskRunner> task_runner_;
scoped_ptr<AudioSplicer> splicer_;
+ scoped_ptr<AudioBufferConverter> buffer_converter_;
+
+ // Whether or not we expect to handle config changes.
+ bool expecting_config_changes_;
// The sink (destination) for rendered audio. |sink_| must only be accessed
// on |task_runner_|. |sink_| must never be called under |lock_| or else we
@@ -193,7 +203,10 @@ class MEDIA_EXPORT AudioRendererImpl
AudioBufferStream audio_buffer_stream_;
- // AudioParameters constructed during Initialize().
+ // Interface to the hardware audio params.
+ const AudioHardwareConfig* const hardware_config_;
+
+ // Cached copy of hardware params from |hardware_config_|.
AudioParameters audio_parameters_;
// Callbacks provided during Initialize().
diff --git a/media/filters/audio_renderer_impl_unittest.cc b/media/filters/audio_renderer_impl_unittest.cc
index 965e5cf..c4fb6ac 100644
--- a/media/filters/audio_renderer_impl_unittest.cc
+++ b/media/filters/audio_renderer_impl_unittest.cc
@@ -11,6 +11,7 @@
#include "base/stl_util.h"
#include "base/strings/stringprintf.h"
#include "media/base/audio_buffer.h"
+#include "media/base/audio_hardware_config.h"
#include "media/base/audio_timestamp_helper.h"
#include "media/base/fake_audio_renderer_sink.h"
#include "media/base/gmock_callback_support.h"
@@ -51,7 +52,8 @@ class AudioRendererImplTest : public ::testing::Test {
public:
// Give the decoder some non-garbage media properties.
AudioRendererImplTest()
- : needs_stop_(true),
+ : hardware_config_(AudioParameters(), AudioParameters()),
+ needs_stop_(true),
demuxer_stream_(DemuxerStream::AUDIO),
decoder_(new MockAudioDecoder()) {
AudioDecoderConfig audio_config(kCodec,
@@ -73,26 +75,24 @@ class AudioRendererImplTest : public ::testing::Test {
EXPECT_CALL(*decoder_, Stop(_))
.WillRepeatedly(Invoke(this, &AudioRendererImplTest::StopDecoder));
- // Set up audio properties.
- EXPECT_CALL(*decoder_, bits_per_channel())
- .WillRepeatedly(Return(audio_config.bits_per_channel()));
- EXPECT_CALL(*decoder_, channel_layout())
- .WillRepeatedly(Return(audio_config.channel_layout()));
- EXPECT_CALL(*decoder_, samples_per_second())
- .WillRepeatedly(Return(audio_config.samples_per_second()));
-
// Mock out demuxer reads
EXPECT_CALL(demuxer_stream_, Read(_)).WillRepeatedly(
RunCallback<0>(DemuxerStream::kOk, DecoderBuffer::CreateEOSBuffer()));
-
+ AudioParameters out_params =
+ AudioParameters(AudioParameters::AUDIO_PCM_LOW_LATENCY,
+ kChannelLayout,
+ kSamplesPerSecond,
+ SampleFormatToBytesPerChannel(kSampleFormat) * 8,
+ 512);
+ hardware_config_.UpdateOutputConfig(out_params);
ScopedVector<AudioDecoder> decoders;
decoders.push_back(decoder_);
sink_ = new FakeAudioRendererSink();
- renderer_.reset(new AudioRendererImpl(
- message_loop_.message_loop_proxy(),
- sink_,
- decoders.Pass(),
- SetDecryptorReadyCB()));
+ renderer_.reset(new AudioRendererImpl(message_loop_.message_loop_proxy(),
+ sink_,
+ decoders.Pass(),
+ SetDecryptorReadyCB(),
+ &hardware_config_));
// Stub out time.
renderer_->set_now_cb_for_testing(base::Bind(
@@ -113,17 +113,6 @@ class AudioRendererImplTest : public ::testing::Test {
.WillOnce(RunCallback<1>(DECODER_ERROR_NOT_SUPPORTED));
}
- void ExpectUnsupportedAudioDecoderConfig() {
- EXPECT_CALL(*decoder_, bits_per_channel())
- .WillRepeatedly(Return(3));
- EXPECT_CALL(*decoder_, channel_layout())
- .WillRepeatedly(Return(CHANNEL_LAYOUT_UNSUPPORTED));
- EXPECT_CALL(*decoder_, samples_per_second())
- .WillRepeatedly(Return(0));
- EXPECT_CALL(*decoder_, Initialize(_, _))
- .WillOnce(RunCallback<1>(PIPELINE_OK));
- }
-
MOCK_METHOD1(OnStatistics, void(const PipelineStatistics&));
MOCK_METHOD0(OnUnderflow, void());
MOCK_METHOD0(OnDisabled, void());
@@ -138,8 +127,8 @@ class AudioRendererImplTest : public ::testing::Test {
.WillOnce(RunCallback<1>(PIPELINE_OK));
InitializeWithStatus(PIPELINE_OK);
- next_timestamp_.reset(
- new AudioTimestampHelper(decoder_->samples_per_second()));
+ next_timestamp_.reset(new AudioTimestampHelper(
+ hardware_config_.GetOutputConfig().sample_rate()));
}
void InitializeWithStatus(PipelineStatus expected) {
@@ -368,7 +357,7 @@ class AudioRendererImplTest : public ::testing::Test {
do {
TimeDelta audio_delay = TimeDelta::FromMicroseconds(
total_frames_read * Time::kMicrosecondsPerSecond /
- static_cast<float>(decoder_->samples_per_second()));
+ static_cast<float>(hardware_config_.GetOutputConfig().sample_rate()));
frames_read = renderer_->Render(
bus.get(), audio_delay.InMilliseconds());
@@ -460,6 +449,7 @@ class AudioRendererImplTest : public ::testing::Test {
base::MessageLoop message_loop_;
scoped_ptr<AudioRendererImpl> renderer_;
scoped_refptr<FakeAudioRendererSink> sink_;
+ AudioHardwareConfig hardware_config_;
// Whether or not the test needs the destructor to call Stop() on
// |renderer_| at destruction.
@@ -535,11 +525,6 @@ class AudioRendererImplTest : public ::testing::Test {
DISALLOW_COPY_AND_ASSIGN(AudioRendererImplTest);
};
-TEST_F(AudioRendererImplTest, Initialize_Failed) {
- ExpectUnsupportedAudioDecoderConfig();
- InitializeWithStatus(PIPELINE_ERROR_INITIALIZATION_FAILED);
-}
-
TEST_F(AudioRendererImplTest, Initialize_Successful) {
Initialize();
}
diff --git a/media/filters/decoder_stream.cc b/media/filters/decoder_stream.cc
index 0390abe..2863d22 100644
--- a/media/filters/decoder_stream.cc
+++ b/media/filters/decoder_stream.cc
@@ -376,6 +376,10 @@ void DecoderStream<StreamType>::OnBufferReady(
if (status == DemuxerStream::kConfigChanged) {
FUNCTION_DVLOG(2) << ": " << "ConfigChanged";
DCHECK(stream_->SupportsConfigChanges());
+
+ if (!config_change_observer_cb_.is_null())
+ config_change_observer_cb_.Run();
+
state_ = STATE_FLUSHING_DECODER;
if (!reset_cb_.is_null()) {
AbortRead();
diff --git a/media/filters/decoder_stream.h b/media/filters/decoder_stream.h
index 3a24792..cc44885 100644
--- a/media/filters/decoder_stream.h
+++ b/media/filters/decoder_stream.h
@@ -98,6 +98,15 @@ class MEDIA_EXPORT DecoderStream {
splice_observer_cb_ = splice_observer;
}
+ // Allows callers to register for notification of config changes; this is
+ // called immediately after recieving the 'kConfigChanged' status from the
+ // DemuxerStream, before any action is taken to handle the config change.
+ typedef base::Closure ConfigChangeObserverCB;
+ void set_config_change_observer(
+ const ConfigChangeObserverCB& config_change_observer) {
+ config_change_observer_cb_ = config_change_observer;
+ }
+
private:
enum State {
STATE_UNINITIALIZED,
@@ -174,6 +183,7 @@ class MEDIA_EXPORT DecoderStream {
scoped_ptr<DecryptingDemuxerStream> decrypting_demuxer_stream_;
SpliceObserverCB splice_observer_cb_;
+ ConfigChangeObserverCB config_change_observer_cb_;
// NOTE: Weak pointers must be invalidated before all other member variables.
base::WeakPtrFactory<DecoderStream<StreamType> > weak_factory_;
diff --git a/media/filters/decrypting_audio_decoder.cc b/media/filters/decrypting_audio_decoder.cc
index 279e19b..7336f21 100644
--- a/media/filters/decrypting_audio_decoder.cc
+++ b/media/filters/decrypting_audio_decoder.cc
@@ -42,9 +42,6 @@ DecryptingAudioDecoder::DecryptingAudioDecoder(
set_decryptor_ready_cb_(set_decryptor_ready_cb),
decryptor_(NULL),
key_added_while_decode_pending_(false),
- bits_per_channel_(0),
- channel_layout_(CHANNEL_LAYOUT_NONE),
- samples_per_second_(0),
weak_factory_(this) {}
void DecryptingAudioDecoder::Initialize(const AudioDecoderConfig& config,
@@ -185,21 +182,6 @@ void DecryptingAudioDecoder::Stop(const base::Closure& closure) {
task_runner_->PostTask(FROM_HERE, closure);
}
-int DecryptingAudioDecoder::bits_per_channel() {
- DCHECK(task_runner_->BelongsToCurrentThread());
- return bits_per_channel_;
-}
-
-ChannelLayout DecryptingAudioDecoder::channel_layout() {
- DCHECK(task_runner_->BelongsToCurrentThread());
- return channel_layout_;
-}
-
-int DecryptingAudioDecoder::samples_per_second() {
- DCHECK(task_runner_->BelongsToCurrentThread());
- return samples_per_second_;
-}
-
DecryptingAudioDecoder::~DecryptingAudioDecoder() {
DCHECK(state_ == kUninitialized || state_ == kStopped) << state_;
}
@@ -260,7 +242,8 @@ void DecryptingAudioDecoder::FinishInitialization(bool success) {
}
// Success!
- UpdateDecoderConfig();
+ timestamp_helper_.reset(
+ new AudioTimestampHelper(config_.samples_per_second()));
decryptor_->RegisterNewKeyCB(
Decryptor::kAudio,
@@ -380,13 +363,6 @@ void DecryptingAudioDecoder::DoReset() {
base::ResetAndReturn(&reset_cb_).Run();
}
-void DecryptingAudioDecoder::UpdateDecoderConfig() {
- bits_per_channel_ = kSupportedBitsPerChannel;
- channel_layout_ = config_.channel_layout();
- samples_per_second_ = config_.samples_per_second();
- timestamp_helper_.reset(new AudioTimestampHelper(samples_per_second_));
-}
-
void DecryptingAudioDecoder::EnqueueFrames(
const Decryptor::AudioBuffers& frames) {
queued_audio_frames_ = frames;
diff --git a/media/filters/decrypting_audio_decoder.h b/media/filters/decrypting_audio_decoder.h
index 8184d42..562faa0 100644
--- a/media/filters/decrypting_audio_decoder.h
+++ b/media/filters/decrypting_audio_decoder.h
@@ -50,9 +50,6 @@ class MEDIA_EXPORT DecryptingAudioDecoder : public AudioDecoder {
virtual scoped_refptr<AudioBuffer> GetDecodeOutput() OVERRIDE;
virtual void Reset(const base::Closure& closure) OVERRIDE;
virtual void Stop(const base::Closure& closure) OVERRIDE;
- virtual int bits_per_channel() OVERRIDE;
- virtual ChannelLayout channel_layout() OVERRIDE;
- virtual int samples_per_second() OVERRIDE;
private:
// For a detailed state diagram please see this link: http://goo.gl/8jAok
@@ -93,10 +90,6 @@ class MEDIA_EXPORT DecryptingAudioDecoder : public AudioDecoder {
// Resets decoder and calls |reset_cb_|.
void DoReset();
- // Updates audio configs from |demuxer_stream_| and resets
- // |output_timestamp_base_| and |total_samples_decoded_|.
- void UpdateDecoderConfig();
-
// Sets timestamp and duration for |queued_audio_frames_| to make sure the
// renderer always receives continuous frames without gaps and overlaps.
void EnqueueFrames(const Decryptor::AudioBuffers& frames);
@@ -130,11 +123,6 @@ class MEDIA_EXPORT DecryptingAudioDecoder : public AudioDecoder {
Decryptor::AudioBuffers queued_audio_frames_;
- // Decoded audio format.
- int bits_per_channel_;
- ChannelLayout channel_layout_;
- int samples_per_second_;
-
scoped_ptr<AudioTimestampHelper> timestamp_helper_;
// NOTE: Weak pointers must be invalidated before all other member variables.
diff --git a/media/filters/decrypting_audio_decoder_unittest.cc b/media/filters/decrypting_audio_decoder_unittest.cc
index a6a8152..777db6f 100644
--- a/media/filters/decrypting_audio_decoder_unittest.cc
+++ b/media/filters/decrypting_audio_decoder_unittest.cc
@@ -118,11 +118,6 @@ class DecryptingAudioDecoderTest : public testing::Test {
CHANNEL_LAYOUT_STEREO, kSampleRate, NULL, 0, true, true,
base::TimeDelta(), base::TimeDelta());
InitializeAndExpectStatus(config_, PIPELINE_OK);
-
- EXPECT_EQ(DecryptingAudioDecoder::kSupportedBitsPerChannel,
- decoder_->bits_per_channel());
- EXPECT_EQ(config_.channel_layout(), decoder_->channel_layout());
- EXPECT_EQ(config_.samples_per_second(), decoder_->samples_per_second());
}
void Reinitialize() {
@@ -405,10 +400,6 @@ TEST_F(DecryptingAudioDecoderTest, Reinitialize_ConfigChange) {
ReinitializeConfigChange(new_config);
message_loop_.RunUntilIdle();
-
- EXPECT_EQ(new_config.bits_per_channel(), decoder_->bits_per_channel());
- EXPECT_EQ(new_config.channel_layout(), decoder_->channel_layout());
- EXPECT_EQ(new_config.samples_per_second(), decoder_->samples_per_second());
}
// Test the case where the a key is added when the decryptor is in
diff --git a/media/filters/ffmpeg_audio_decoder.cc b/media/filters/ffmpeg_audio_decoder.cc
index e667330..c8f8f50 100644
--- a/media/filters/ffmpeg_audio_decoder.cc
+++ b/media/filters/ffmpeg_audio_decoder.cc
@@ -128,10 +128,6 @@ FFmpegAudioDecoder::FFmpegAudioDecoder(
const scoped_refptr<base::SingleThreadTaskRunner>& task_runner)
: task_runner_(task_runner),
state_(kUninitialized),
- bytes_per_channel_(0),
- channel_layout_(CHANNEL_LAYOUT_NONE),
- channels_(0),
- samples_per_second_(0),
av_sample_format_(0),
last_input_timestamp_(kNoTimestamp()),
output_frames_to_drop_(0) {}
@@ -197,21 +193,6 @@ scoped_refptr<AudioBuffer> FFmpegAudioDecoder::GetDecodeOutput() {
return out;
}
-int FFmpegAudioDecoder::bits_per_channel() {
- DCHECK(task_runner_->BelongsToCurrentThread());
- return bytes_per_channel_ * 8;
-}
-
-ChannelLayout FFmpegAudioDecoder::channel_layout() {
- DCHECK(task_runner_->BelongsToCurrentThread());
- return channel_layout_;
-}
-
-int FFmpegAudioDecoder::samples_per_second() {
- DCHECK(task_runner_->BelongsToCurrentThread());
- return samples_per_second_;
-}
-
void FFmpegAudioDecoder::Reset(const base::Closure& closure) {
DCHECK(task_runner_->BelongsToCurrentThread());
@@ -286,8 +267,8 @@ void FFmpegAudioDecoder::DecodeBuffer(
buffer->timestamp() < base::TimeDelta()) {
// Dropping frames for negative timestamps as outlined in section A.2
// in the Vorbis spec. http://xiph.org/vorbis/doc/Vorbis_I_spec.html
- output_frames_to_drop_ = floor(
- 0.5 + -buffer->timestamp().InSecondsF() * samples_per_second_);
+ output_frames_to_drop_ = floor(0.5 + -buffer->timestamp().InSecondsF() *
+ config_.samples_per_second());
} else {
if (last_input_timestamp_ != kNoTimestamp() &&
buffer->timestamp() < last_input_timestamp_) {
@@ -392,17 +373,14 @@ bool FFmpegAudioDecoder::FFmpegDecode(
int original_frames = 0;
int channels = DetermineChannels(av_frame_.get());
if (frame_decoded) {
-
- // TODO(rileya) Remove this check once we properly support midstream audio
- // config changes.
if (av_frame_->sample_rate != config_.samples_per_second() ||
- channels != channels_ ||
+ channels != ChannelLayoutToChannelCount(config_.channel_layout()) ||
av_frame_->format != av_sample_format_) {
DLOG(ERROR) << "Unsupported midstream configuration change!"
<< " Sample Rate: " << av_frame_->sample_rate << " vs "
- << samples_per_second_
+ << config_.samples_per_second()
<< ", Channels: " << channels << " vs "
- << channels_
+ << ChannelLayoutToChannelCount(config_.channel_layout())
<< ", Sample Format: " << av_frame_->format << " vs "
<< av_sample_format_;
@@ -417,7 +395,8 @@ bool FFmpegAudioDecoder::FFmpegDecode(
output = reinterpret_cast<AudioBuffer*>(
av_buffer_get_opaque(av_frame_->buf[0]));
- DCHECK_EQ(channels_, output->channel_count());
+ DCHECK_EQ(ChannelLayoutToChannelCount(config_.channel_layout()),
+ output->channel_count());
original_frames = av_frame_->nb_samples;
int unread_frames = output->frame_count() - original_frames;
DCHECK_GE(unread_frames, 0);
@@ -480,21 +459,6 @@ bool FFmpegAudioDecoder::ConfigureDecoder() {
return false;
}
- // TODO(rileya) Remove this check once we properly support midstream audio
- // config changes.
- if (codec_context_.get() &&
- (channel_layout_ != config_.channel_layout() ||
- samples_per_second_ != config_.samples_per_second())) {
- DVLOG(1) << "Unsupported config change :";
- DVLOG(1) << "\tbytes_per_channel : " << bytes_per_channel_
- << " -> " << config_.bytes_per_channel();
- DVLOG(1) << "\tchannel_layout : " << channel_layout_
- << " -> " << config_.channel_layout();
- DVLOG(1) << "\tsample_rate : " << samples_per_second_
- << " -> " << config_.samples_per_second();
- return false;
- }
-
// Release existing decoder resources if necessary.
ReleaseFFmpegResources();
@@ -517,27 +481,21 @@ bool FFmpegAudioDecoder::ConfigureDecoder() {
// Success!
av_frame_.reset(av_frame_alloc());
- channel_layout_ = config_.channel_layout();
- samples_per_second_ = config_.samples_per_second();
output_timestamp_helper_.reset(
new AudioTimestampHelper(config_.samples_per_second()));
- // Store initial values to guard against midstream configuration changes.
- channels_ = codec_context_->channels;
- if (channels_ != ChannelLayoutToChannelCount(channel_layout_)) {
+ av_sample_format_ = codec_context_->sample_fmt;
+
+ if (codec_context_->channels !=
+ ChannelLayoutToChannelCount(config_.channel_layout())) {
DLOG(ERROR) << "Audio configuration specified "
- << ChannelLayoutToChannelCount(channel_layout_)
+ << ChannelLayoutToChannelCount(config_.channel_layout())
<< " channels, but FFmpeg thinks the file contains "
- << channels_ << " channels";
+ << codec_context_->channels << " channels";
ReleaseFFmpegResources();
state_ = kUninitialized;
return false;
}
- av_sample_format_ = codec_context_->sample_fmt;
- sample_format_ = AVSampleFormatToSampleFormat(
- static_cast<AVSampleFormat>(av_sample_format_));
- bytes_per_channel_ = SampleFormatToBytesPerChannel(sample_format_);
-
return true;
}
diff --git a/media/filters/ffmpeg_audio_decoder.h b/media/filters/ffmpeg_audio_decoder.h
index ca965d7..32fddb8 100644
--- a/media/filters/ffmpeg_audio_decoder.h
+++ b/media/filters/ffmpeg_audio_decoder.h
@@ -39,9 +39,6 @@ class MEDIA_EXPORT FFmpegAudioDecoder : public AudioDecoder {
virtual void Decode(const scoped_refptr<DecoderBuffer>& buffer,
const DecodeCB& decode_cb) OVERRIDE;
virtual scoped_refptr<AudioBuffer> GetDecodeOutput() OVERRIDE;
- virtual int bits_per_channel() OVERRIDE;
- virtual ChannelLayout channel_layout() OVERRIDE;
- virtual int samples_per_second() OVERRIDE;
virtual void Reset(const base::Closure& closure) OVERRIDE;
virtual void Stop(const base::Closure& closure) OVERRIDE;
@@ -79,17 +76,10 @@ class MEDIA_EXPORT FFmpegAudioDecoder : public AudioDecoder {
scoped_ptr<AVCodecContext, ScopedPtrAVFreeContext> codec_context_;
scoped_ptr<AVFrame, ScopedPtrAVFreeFrame> av_frame_;
- // Decoded audio format.
- int bytes_per_channel_;
- ChannelLayout channel_layout_;
- int channels_;
- int samples_per_second_;
+ AudioDecoderConfig config_;
// AVSampleFormat initially requested; not Chrome's SampleFormat.
int av_sample_format_;
- SampleFormat sample_format_;
-
- AudioDecoderConfig config_;
// Used for computing output timestamps.
scoped_ptr<AudioTimestampHelper> output_timestamp_helper_;
diff --git a/media/filters/ffmpeg_audio_decoder_unittest.cc b/media/filters/ffmpeg_audio_decoder_unittest.cc
index 6f571e7..ee09669 100644
--- a/media/filters/ffmpeg_audio_decoder_unittest.cc
+++ b/media/filters/ffmpeg_audio_decoder_unittest.cc
@@ -174,9 +174,6 @@ TEST_F(FFmpegAudioDecoderTest, Initialize) {
vorbis_extradata_->data(),
vorbis_extradata_->data_size(),
false); // Not encrypted.
- EXPECT_EQ(config.bits_per_channel(), decoder_->bits_per_channel());
- EXPECT_EQ(config.channel_layout(), decoder_->channel_layout());
- EXPECT_EQ(config.samples_per_second(), decoder_->samples_per_second());
Stop();
}
diff --git a/media/filters/opus_audio_decoder.cc b/media/filters/opus_audio_decoder.cc
index 1d132c5..161d0c8 100644
--- a/media/filters/opus_audio_decoder.cc
+++ b/media/filters/opus_audio_decoder.cc
@@ -251,10 +251,6 @@ OpusAudioDecoder::OpusAudioDecoder(
const scoped_refptr<base::SingleThreadTaskRunner>& task_runner)
: task_runner_(task_runner),
opus_decoder_(NULL),
- channel_layout_(CHANNEL_LAYOUT_NONE),
- samples_per_second_(0),
- sample_format_(kSampleFormatF32),
- bits_per_channel_(SampleFormatToBytesPerChannel(sample_format_) * 8),
last_input_timestamp_(kNoTimestamp()),
frames_to_discard_(0),
frame_delay_at_start_(0),
@@ -283,21 +279,6 @@ void OpusAudioDecoder::Decode(const scoped_refptr<DecoderBuffer>& buffer,
DecodeBuffer(buffer, BindToCurrentLoop(decode_cb));
}
-int OpusAudioDecoder::bits_per_channel() {
- DCHECK(task_runner_->BelongsToCurrentThread());
- return bits_per_channel_;
-}
-
-ChannelLayout OpusAudioDecoder::channel_layout() {
- DCHECK(task_runner_->BelongsToCurrentThread());
- return channel_layout_;
-}
-
-int OpusAudioDecoder::samples_per_second() {
- DCHECK(task_runner_->BelongsToCurrentThread());
- return samples_per_second_;
-}
-
void OpusAudioDecoder::Reset(const base::Closure& closure) {
DCHECK(task_runner_->BelongsToCurrentThread());
@@ -401,19 +382,6 @@ bool OpusAudioDecoder::ConfigureDecoder() {
return false;
}
- // TODO(rileya) Remove this check once we properly support midstream audio
- // config changes.
- if (opus_decoder_ &&
- (channel_layout_ != config_.channel_layout() ||
- samples_per_second_ != config_.samples_per_second())) {
- DLOG(ERROR) << "Unsupported config change -"
- << ", channel_layout: " << channel_layout_
- << " -> " << config_.channel_layout()
- << ", sample_rate: " << samples_per_second_
- << " -> " << config_.samples_per_second();
- return false;
- }
-
// Clean up existing decoder if necessary.
CloseDecoder();
@@ -473,8 +441,6 @@ bool OpusAudioDecoder::ConfigureDecoder() {
return false;
}
- channel_layout_ = config_.channel_layout();
- samples_per_second_ = config_.samples_per_second();
output_timestamp_helper_.reset(
new AudioTimestampHelper(config_.samples_per_second()));
start_input_timestamp_ = kNoTimestamp();
@@ -491,22 +457,21 @@ void OpusAudioDecoder::CloseDecoder() {
void OpusAudioDecoder::ResetTimestampState() {
output_timestamp_helper_->SetBaseTimestamp(kNoTimestamp());
last_input_timestamp_ = kNoTimestamp();
- frames_to_discard_ =
- TimeDeltaToAudioFrames(config_.seek_preroll(), samples_per_second_);
+ frames_to_discard_ = TimeDeltaToAudioFrames(config_.seek_preroll(),
+ config_.samples_per_second());
}
bool OpusAudioDecoder::Decode(const scoped_refptr<DecoderBuffer>& input,
scoped_refptr<AudioBuffer>* output_buffer) {
// Allocate a buffer for the output samples.
- *output_buffer = AudioBuffer::CreateBuffer(
- sample_format_,
- channel_layout_,
- samples_per_second_,
- kMaxOpusOutputPacketSizeSamples);
+ *output_buffer = AudioBuffer::CreateBuffer(config_.sample_format(),
+ config_.channel_layout(),
+ config_.samples_per_second(),
+ kMaxOpusOutputPacketSizeSamples);
const int buffer_size =
output_buffer->get()->channel_count() *
output_buffer->get()->frame_count() *
- SampleFormatToBytesPerChannel(sample_format_);
+ SampleFormatToBytesPerChannel(config_.sample_format());
float* float_output_buffer = reinterpret_cast<float*>(
output_buffer->get()->channel_data()[0]);
@@ -548,8 +513,8 @@ bool OpusAudioDecoder::Decode(const scoped_refptr<DecoderBuffer>& input,
frames_to_discard_ = 0;
}
if (input->discard_padding().InMicroseconds() > 0) {
- int discard_padding = TimeDeltaToAudioFrames(input->discard_padding(),
- samples_per_second_);
+ int discard_padding = TimeDeltaToAudioFrames(
+ input->discard_padding(), config_.samples_per_second());
if (discard_padding < 0 || discard_padding > frames_to_output) {
DVLOG(1) << "Invalid file. Incorrect discard padding value.";
return false;
diff --git a/media/filters/opus_audio_decoder.h b/media/filters/opus_audio_decoder.h
index aa0331f..5855719 100644
--- a/media/filters/opus_audio_decoder.h
+++ b/media/filters/opus_audio_decoder.h
@@ -35,9 +35,6 @@ class MEDIA_EXPORT OpusAudioDecoder : public AudioDecoder {
const PipelineStatusCB& status_cb) OVERRIDE;
virtual void Decode(const scoped_refptr<DecoderBuffer>& buffer,
const DecodeCB& decode_cb) OVERRIDE;
- virtual int bits_per_channel() OVERRIDE;
- virtual ChannelLayout channel_layout() OVERRIDE;
- virtual int samples_per_second() OVERRIDE;
virtual void Reset(const base::Closure& closure) OVERRIDE;
virtual void Stop(const base::Closure& closure) OVERRIDE;
@@ -61,12 +58,6 @@ class MEDIA_EXPORT OpusAudioDecoder : public AudioDecoder {
AudioDecoderConfig config_;
OpusMSDecoder* opus_decoder_;
- // Decoded audio format.
- ChannelLayout channel_layout_;
- int samples_per_second_;
- const SampleFormat sample_format_;
- const int bits_per_channel_;
-
// Used for computing output timestamps.
scoped_ptr<AudioTimestampHelper> output_timestamp_helper_;
base::TimeDelta last_input_timestamp_;
diff --git a/media/filters/pipeline_integration_test_base.cc b/media/filters/pipeline_integration_test_base.cc
index a11ce50..bf639cc 100644
--- a/media/filters/pipeline_integration_test_base.cc
+++ b/media/filters/pipeline_integration_test_base.cc
@@ -29,11 +29,12 @@ const char kNullAudioHash[] = "0.00,0.00,0.00,0.00,0.00,0.00,";
PipelineIntegrationTestBase::PipelineIntegrationTestBase()
: hashing_enabled_(false),
clockless_playback_(false),
- pipeline_(new Pipeline(message_loop_.message_loop_proxy(),
- new MediaLog())),
+ pipeline_(
+ new Pipeline(message_loop_.message_loop_proxy(), new MediaLog())),
ended_(false),
pipeline_status_(PIPELINE_OK),
- last_video_frame_format_(VideoFrame::UNKNOWN) {
+ last_video_frame_format_(VideoFrame::UNKNOWN),
+ hardware_config_(AudioParameters(), AudioParameters()) {
base::MD5Init(&md5_context_);
EXPECT_CALL(*this, OnSetOpaque(true)).Times(AnyNumber());
}
@@ -266,6 +267,13 @@ PipelineIntegrationTestBase::CreateFilterCollection(
audio_decoders.push_back(
new OpusAudioDecoder(message_loop_.message_loop_proxy()));
+ AudioParameters out_params(AudioParameters::AUDIO_PCM_LOW_LATENCY,
+ CHANNEL_LAYOUT_STEREO,
+ 44100,
+ 16,
+ 512);
+ hardware_config_.UpdateOutputConfig(out_params);
+
AudioRendererImpl* audio_renderer_impl = new AudioRendererImpl(
message_loop_.message_loop_proxy(),
(clockless_playback_)
@@ -274,7 +282,8 @@ PipelineIntegrationTestBase::CreateFilterCollection(
audio_decoders.Pass(),
base::Bind(&PipelineIntegrationTestBase::SetDecryptor,
base::Unretained(this),
- decryptor));
+ decryptor),
+ &hardware_config_);
// Disable underflow if hashing is enabled.
if (hashing_enabled_) {
audio_sink_->StartAudioHashForTesting();
diff --git a/media/filters/pipeline_integration_test_base.h b/media/filters/pipeline_integration_test_base.h
index 39d2e2c..25a5924 100644
--- a/media/filters/pipeline_integration_test_base.h
+++ b/media/filters/pipeline_integration_test_base.h
@@ -9,6 +9,7 @@
#include "base/message_loop/message_loop.h"
#include "media/audio/clockless_audio_sink.h"
#include "media/audio/null_audio_sink.h"
+#include "media/base/audio_hardware_config.h"
#include "media/base/demuxer.h"
#include "media/base/filter_collection.h"
#include "media/base/media_keys.h"
@@ -109,6 +110,7 @@ class PipelineIntegrationTestBase {
Demuxer::NeedKeyCB need_key_cb_;
VideoFrame::Format last_video_frame_format_;
DummyTickClock dummy_clock_;
+ AudioHardwareConfig hardware_config_;
void OnStatusCallbackChecked(PipelineStatus expected_status,
PipelineStatus status);
diff --git a/media/filters/source_buffer_stream.cc b/media/filters/source_buffer_stream.cc
index fdaa63b..89281d6 100644
--- a/media/filters/source_buffer_stream.cc
+++ b/media/filters/source_buffer_stream.cc
@@ -1352,21 +1352,6 @@ bool SourceBufferStream::UpdateAudioConfig(const AudioDecoderConfig& config) {
return false;
}
- if (audio_configs_[0].samples_per_second() != config.samples_per_second()) {
- MEDIA_LOG(log_cb_) << "Audio sample rate changes not allowed.";
- return false;
- }
-
- if (audio_configs_[0].channel_layout() != config.channel_layout()) {
- MEDIA_LOG(log_cb_) << "Audio channel layout changes not allowed.";
- return false;
- }
-
- if (audio_configs_[0].bits_per_channel() != config.bits_per_channel()) {
- MEDIA_LOG(log_cb_) << "Audio bits per channel changes not allowed.";
- return false;
- }
-
if (audio_configs_[0].is_encrypted() != config.is_encrypted()) {
MEDIA_LOG(log_cb_) << "Audio encryption changes not allowed.";
return false;
diff --git a/media/media.gyp b/media/media.gyp
index 52faec9..8c1be6d 100644
--- a/media/media.gyp
+++ b/media/media.gyp
@@ -210,6 +210,8 @@
'base/audio_buffer_queue.cc',
'base/audio_buffer_queue.h',
'base/audio_capturer_source.h',
+ 'base/audio_buffer_converter.cc',
+ 'base/audio_buffer_converter.h',
'base/audio_converter.cc',
'base/audio_converter.h',
'base/audio_decoder.cc',
@@ -950,6 +952,7 @@
'base/android/media_codec_bridge_unittest.cc',
'base/android/media_drm_bridge_unittest.cc',
'base/android/media_source_player_unittest.cc',
+ 'base/audio_buffer_converter_unittest.cc',
'base/audio_buffer_unittest.cc',
'base/audio_buffer_queue_unittest.cc',
'base/audio_bus_unittest.cc',
diff --git a/media/tools/player_x11/player_x11.cc b/media/tools/player_x11/player_x11.cc
index 26834ea..0cec7b82 100644
--- a/media/tools/player_x11/player_x11.cc
+++ b/media/tools/player_x11/player_x11.cc
@@ -16,6 +16,7 @@
#include "base/threading/thread.h"
#include "media/audio/audio_manager.h"
#include "media/audio/null_audio_sink.h"
+#include "media/base/audio_hardware_config.h"
#include "media/base/decryptor.h"
#include "media/base/filter_collection.h"
#include "media/base/media.h"
@@ -137,11 +138,21 @@ void InitPipeline(
ScopedVector<media::AudioDecoder> audio_decoders;
audio_decoders.push_back(new media::FFmpegAudioDecoder(task_runner));
- scoped_ptr<media::AudioRenderer> audio_renderer(new media::AudioRendererImpl(
- task_runner,
- new media::NullAudioSink(task_runner),
- audio_decoders.Pass(),
- media::SetDecryptorReadyCB()));
+ media::AudioParameters out_params(
+ media::AudioParameters::AUDIO_PCM_LOW_LATENCY,
+ media::CHANNEL_LAYOUT_STEREO,
+ 44100,
+ 16,
+ 512);
+ media::AudioHardwareConfig hardware_config(out_params, out_params);
+
+ scoped_ptr<media::AudioRenderer> audio_renderer(
+ new media::AudioRendererImpl(task_runner,
+ new media::NullAudioSink(task_runner),
+ audio_decoders.Pass(),
+ media::SetDecryptorReadyCB(),
+ &hardware_config));
+
collection->SetAudioRenderer(audio_renderer.Pass());
base::WaitableEvent event(true, false);