summaryrefslogtreecommitdiffstats
path: root/media/base
diff options
context:
space:
mode:
authordalecurtis@chromium.org <dalecurtis@chromium.org@0039d316-1c4b-4281-b951-d872f2087c98>2012-11-21 04:24:19 +0000
committerdalecurtis@chromium.org <dalecurtis@chromium.org@0039d316-1c4b-4281-b951-d872f2087c98>2012-11-21 04:24:19 +0000
commit4187ea43e11dd33ec2d542edd76163d8fc7ed1c1 (patch)
tree142e57255a31a095905e78423279f1fa2f4eb42e /media/base
parent2d115a11ba119dc8d3df465b3fdb96fb94a0e1bc (diff)
downloadchromium_src-4187ea43e11dd33ec2d542edd76163d8fc7ed1c1.zip
chromium_src-4187ea43e11dd33ec2d542edd76163d8fc7ed1c1.tar.gz
chromium_src-4187ea43e11dd33ec2d542edd76163d8fc7ed1c1.tar.bz2
Collapse AudioRendererMixer and OnMoreDataResampler into AudioTransform.
Currently we have roughly equivalent functionality in two places, and the CloudView project will add a third. As such there's a need for a single super class which can handle mixing, resampling, and general conversion from one set of AudioParameters to another. This change introduces the AudioTransform object which collapses the key functionality from AudioRendererMixer and OnMoreDataResampler into a single AudioTransform class which can do everything and is oblivious to the peculiars of RenderCallback vs AudioSourceCallback. It also introduces output_frames_ready() methods to the AudioPullFifo and MultiChannelResampler classes so that buffer delay can be measured accurately without resorting to input vs output byte counting. Due to the bulk of AudioRendererMixer's functionality moving into the new AudioTransform, it made sense to move some decisions into the AudioRendererMixerInput class as well. On my Z600, benchmarking 50000 iterations: Convert() w/ FIFO took 7030.11ms. Convert() w/o FIFO took 5218.83ms. BUG=none TEST=AudioTransform* unittests. TBR=sergeyu Review URL: https://chromiumcodereview.appspot.com/11410012 git-svn-id: svn://svn.chromium.org/chrome/trunk/src@168976 0039d316-1c4b-4281-b951-d872f2087c98
Diffstat (limited to 'media/base')
-rw-r--r--media/base/audio_converter.cc213
-rw-r--r--media/base/audio_converter.h108
-rw-r--r--media/base/audio_converter_unittest.cc286
-rw-r--r--media/base/audio_pull_fifo.cc2
-rw-r--r--media/base/audio_pull_fifo.h5
-rw-r--r--media/base/audio_pull_fifo_unittest.cc10
-rw-r--r--media/base/audio_renderer_mixer.cc91
-rw-r--r--media/base/audio_renderer_mixer.h29
-rw-r--r--media/base/audio_renderer_mixer_input.cc26
-rw-r--r--media/base/audio_renderer_mixer_input.h24
-rw-r--r--media/base/audio_renderer_mixer_input_unittest.cc44
-rw-r--r--media/base/audio_renderer_mixer_unittest.cc52
-rw-r--r--media/base/fake_audio_render_callback.cc13
-rw-r--r--media/base/fake_audio_render_callback.h17
-rw-r--r--media/base/multi_channel_resampler.cc15
-rw-r--r--media/base/multi_channel_resampler.h11
-rw-r--r--media/base/multi_channel_resampler_unittest.cc22
17 files changed, 774 insertions, 194 deletions
diff --git a/media/base/audio_converter.cc b/media/base/audio_converter.cc
new file mode 100644
index 0000000..1b66b03
--- /dev/null
+++ b/media/base/audio_converter.cc
@@ -0,0 +1,213 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "media/base/audio_converter.h"
+
+#include <algorithm>
+
+#include "base/bind.h"
+#include "base/bind_helpers.h"
+#include "media/base/audio_pull_fifo.h"
+#include "media/base/channel_mixer.h"
+#include "media/base/multi_channel_resampler.h"
+#include "media/base/vector_math.h"
+
+namespace media {
+
+AudioConverter::AudioConverter(const AudioParameters& input_params,
+ const AudioParameters& output_params,
+ bool disable_fifo)
+ : downmix_early_(false),
+ resampler_frame_delay_(0),
+ input_channel_count_(input_params.channels()) {
+ CHECK(input_params.IsValid());
+ CHECK(output_params.IsValid());
+
+ // Handle different input and output channel layouts.
+ if (input_params.channel_layout() != output_params.channel_layout()) {
+ DVLOG(1) << "Remixing channel layout from " << input_params.channel_layout()
+ << " to " << output_params.channel_layout() << "; from "
+ << input_params.channels() << " channels to "
+ << output_params.channels() << " channels.";
+ channel_mixer_.reset(new ChannelMixer(
+ input_params.channel_layout(), output_params.channel_layout()));
+
+ // Pare off data as early as we can for efficiency.
+ downmix_early_ = input_params.channels() > output_params.channels();
+ if (downmix_early_) {
+ DVLOG(1) << "Remixing channel layout prior to resampling.";
+ // |unmixed_audio_| will be allocated on the fly.
+ } else {
+ // Instead, if we're not downmixing early we need a temporary AudioBus
+ // which matches the input channel count but uses the output frame size
+ // since we'll mix into the AudioBus from the output stream.
+ unmixed_audio_ = AudioBus::Create(
+ input_params.channels(), output_params.frames_per_buffer());
+ }
+ }
+
+ // Only resample if necessary since it's expensive.
+ if (input_params.sample_rate() != output_params.sample_rate()) {
+ DVLOG(1) << "Resampling from " << input_params.sample_rate() << " to "
+ << output_params.sample_rate();
+ double io_sample_rate_ratio = input_params.sample_rate() /
+ static_cast<double>(output_params.sample_rate());
+ resampler_.reset(new MultiChannelResampler(
+ downmix_early_ ? output_params.channels() :
+ input_params.channels(),
+ io_sample_rate_ratio, base::Bind(
+ &AudioConverter::ProvideInput, base::Unretained(this))));
+ }
+
+ input_frame_duration_ = base::TimeDelta::FromMicroseconds(
+ base::Time::kMicrosecondsPerSecond /
+ static_cast<double>(input_params.sample_rate()));
+ output_frame_duration_ = base::TimeDelta::FromMicroseconds(
+ base::Time::kMicrosecondsPerSecond /
+ static_cast<double>(output_params.sample_rate()));
+
+ if (disable_fifo)
+ return;
+
+ // Since the resampler / output device may want a different buffer size than
+ // the caller asked for, we need to use a FIFO to ensure that both sides
+ // read in chunk sizes they're configured for.
+ if (resampler_.get() ||
+ input_params.frames_per_buffer() != output_params.frames_per_buffer()) {
+ DVLOG(1) << "Rebuffering from " << input_params.frames_per_buffer()
+ << " to " << output_params.frames_per_buffer();
+ audio_fifo_.reset(new AudioPullFifo(
+ downmix_early_ ? output_params.channels() :
+ input_params.channels(),
+ input_params.frames_per_buffer(), base::Bind(
+ &AudioConverter::SourceCallback,
+ base::Unretained(this))));
+ }
+}
+
+AudioConverter::~AudioConverter() {}
+
+void AudioConverter::AddInput(InputCallback* input) {
+ transform_inputs_.push_back(input);
+}
+
+void AudioConverter::RemoveInput(InputCallback* input) {
+ DCHECK(std::find(transform_inputs_.begin(), transform_inputs_.end(), input) !=
+ transform_inputs_.end());
+ transform_inputs_.remove(input);
+
+ if (transform_inputs_.empty())
+ Reset();
+}
+
+void AudioConverter::Reset() {
+ if (audio_fifo_)
+ audio_fifo_->Clear();
+ if (resampler_)
+ resampler_->Flush();
+}
+
+void AudioConverter::Convert(AudioBus* dest) {
+ if (transform_inputs_.empty()) {
+ dest->Zero();
+ return;
+ }
+
+ bool needs_mixing = channel_mixer_ && !downmix_early_;
+ AudioBus* temp_dest = needs_mixing ? unmixed_audio_.get() : dest;
+ DCHECK(temp_dest);
+
+ if (!resampler_ && !audio_fifo_) {
+ SourceCallback(0, temp_dest);
+ } else {
+ if (resampler_)
+ resampler_->Resample(temp_dest, temp_dest->frames());
+ else
+ ProvideInput(0, temp_dest);
+ }
+
+ if (needs_mixing) {
+ DCHECK_EQ(temp_dest->frames(), dest->frames());
+ channel_mixer_->Transform(temp_dest, dest);
+ }
+}
+
+void AudioConverter::SourceCallback(int fifo_frame_delay, AudioBus* dest) {
+ bool needs_downmix = channel_mixer_ && downmix_early_;
+
+ if (!mixer_input_audio_bus_ ||
+ mixer_input_audio_bus_->frames() != dest->frames()) {
+ mixer_input_audio_bus_ =
+ AudioBus::Create(input_channel_count_, dest->frames());
+ }
+
+ if (needs_downmix &&
+ (!unmixed_audio_ || unmixed_audio_->frames() != dest->frames())) {
+ // If we're downmixing early we need a temporary AudioBus which matches
+ // the the input channel count and input frame size since we're passing
+ // |unmixed_audio_| directly to the |source_callback_|.
+ unmixed_audio_ = AudioBus::Create(input_channel_count_, dest->frames());
+ }
+
+ AudioBus* temp_dest = needs_downmix ? unmixed_audio_.get() : dest;
+
+ // Sanity check our inputs.
+ DCHECK_EQ(temp_dest->frames(), mixer_input_audio_bus_->frames());
+ DCHECK_EQ(temp_dest->channels(), mixer_input_audio_bus_->channels());
+
+ // Calculate the buffer delay for this callback.
+ base::TimeDelta buffer_delay;
+ if (resampler_) {
+ buffer_delay += base::TimeDelta::FromMicroseconds(
+ resampler_frame_delay_ * output_frame_duration_.InMicroseconds());
+ }
+ if (audio_fifo_) {
+ buffer_delay += base::TimeDelta::FromMicroseconds(
+ fifo_frame_delay * input_frame_duration_.InMicroseconds());
+ }
+
+ // Have each mixer render its data into an output buffer then mix the result.
+ for (InputCallbackSet::iterator it = transform_inputs_.begin();
+ it != transform_inputs_.end(); ++it) {
+ InputCallback* input = *it;
+
+ float volume = input->ProvideInput(
+ mixer_input_audio_bus_.get(), buffer_delay);
+
+ // Optimize the most common single input, full volume case.
+ if (it == transform_inputs_.begin()) {
+ if (volume == 1.0f) {
+ mixer_input_audio_bus_->CopyTo(temp_dest);
+ continue;
+ }
+
+ // Zero |temp_dest| otherwise, so we're mixing into a clean buffer.
+ temp_dest->Zero();
+ }
+
+ // Volume adjust and mix each mixer input into |temp_dest| after rendering.
+ if (volume > 0) {
+ for (int i = 0; i < mixer_input_audio_bus_->channels(); ++i) {
+ vector_math::FMAC(
+ mixer_input_audio_bus_->channel(i), volume,
+ mixer_input_audio_bus_->frames(), temp_dest->channel(i));
+ }
+ }
+ }
+
+ if (needs_downmix) {
+ DCHECK_EQ(temp_dest->frames(), dest->frames());
+ channel_mixer_->Transform(temp_dest, dest);
+ }
+}
+
+void AudioConverter::ProvideInput(int resampler_frame_delay, AudioBus* dest) {
+ resampler_frame_delay_ = resampler_frame_delay;
+ if (audio_fifo_)
+ audio_fifo_->Consume(dest, dest->frames());
+ else
+ SourceCallback(0, dest);
+}
+
+} // namespace media
diff --git a/media/base/audio_converter.h b/media/base/audio_converter.h
new file mode 100644
index 0000000..2987b78
--- /dev/null
+++ b/media/base/audio_converter.h
@@ -0,0 +1,108 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef MEDIA_BASE_AUDIO_CONVERTER_H_
+#define MEDIA_BASE_AUDIO_CONVERTER_H_
+
+#include <list>
+
+#include "base/callback.h"
+#include "base/time.h"
+#include "media/audio/audio_parameters.h"
+#include "media/base/media_export.h"
+
+namespace media {
+
+class AudioBus;
+class AudioPullFifo;
+class ChannelMixer;
+class MultiChannelResampler;
+
+// AudioConverter is a complete mixing, resampling, buffering, and channel
+// mixing solution for converting data from one set of AudioParameters to
+// another. For efficiency pieces are only invoked when necessary; e.g. the
+// resampler is only used if the input and output sample rates differ. Mixing
+// and channel down mixing are done prior to resampling to maximize efficiency.
+class MEDIA_EXPORT AudioConverter {
+ public:
+ class MEDIA_EXPORT InputCallback {
+ public:
+ // Method for providing more data into the converter. Expects |audio_bus|
+ // to be completely filled with data upon return; zero padded if not enough
+ // frames are available to satisfy the request. The return value is the
+ // volume level of the provided audio data. If a volume level of zero is
+ // returned no further processing will be done on the provided data, else
+ // the volume level will be used to scale the provided audio data.
+ virtual double ProvideInput(AudioBus* audio_bus,
+ base::TimeDelta buffer_delay) = 0;
+
+ protected:
+ virtual ~InputCallback() {}
+ };
+
+ // Construct an AudioConverter for converting between the given input and
+ // output parameters. Specifying |disable_fifo| means all InputCallbacks are
+ // capable of handling arbitrary buffer size requests; i.e. one call might ask
+ // for 10 frames of data (indicated by the size of AudioBus provided) and the
+ // next might ask for 20. In synthetic testing, disabling the FIFO yields a
+ // ~20% speed up for common cases.
+ AudioConverter(const AudioParameters& input_params,
+ const AudioParameters& output_params,
+ bool disable_fifo);
+ ~AudioConverter();
+
+ // Converts audio from all inputs into the |dest|. |dest| must be sized for
+ // data matching the output AudioParameters provided during construction.
+ void Convert(AudioBus* dest);
+
+ // Add or remove an input from the converter.
+ void AddInput(InputCallback* input);
+ void RemoveInput(InputCallback* input);
+
+ // Flush all buffered data. Automatically called when all inputs are removed.
+ void Reset();
+
+ private:
+ // Called by MultiChannelResampler when more data is necessary.
+ void ProvideInput(int resampler_frame_delay, AudioBus* audio_bus);
+
+ // Called by AudioPullFifo when more data is necessary.
+ void SourceCallback(int fifo_frame_delay, AudioBus* audio_bus);
+
+ // Set of inputs for Convert().
+ typedef std::list<InputCallback*> InputCallbackSet;
+ InputCallbackSet transform_inputs_;
+
+ // Used to buffer data between the client and the output device in cases where
+ // the client buffer size is not the same as the output device buffer size.
+ scoped_ptr<AudioPullFifo> audio_fifo_;
+
+ // Handles resampling.
+ scoped_ptr<MultiChannelResampler> resampler_;
+
+ // Handles channel transforms. |unmixed_audio_| is a temporary destination
+ // for audio data before it goes into the channel mixer.
+ scoped_ptr<ChannelMixer> channel_mixer_;
+ scoped_ptr<AudioBus> unmixed_audio_;
+
+ // Temporary AudioBus destination for mixing inputs.
+ scoped_ptr<AudioBus> mixer_input_audio_bus_;
+
+ // Since resampling is expensive, figure out if we should downmix channels
+ // before resampling.
+ bool downmix_early_;
+
+ // Used to calculate buffer delay information for InputCallbacks.
+ base::TimeDelta input_frame_duration_;
+ base::TimeDelta output_frame_duration_;
+ int resampler_frame_delay_;
+
+ const int input_channel_count_;
+
+ DISALLOW_COPY_AND_ASSIGN(AudioConverter);
+};
+
+} // namespace media
+
+#endif // MEDIA_BASE_AUDIO_CONVERTER_H_
diff --git a/media/base/audio_converter_unittest.cc b/media/base/audio_converter_unittest.cc
new file mode 100644
index 0000000..0861222
--- /dev/null
+++ b/media/base/audio_converter_unittest.cc
@@ -0,0 +1,286 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// MSVC++ requires this to be set before any other includes to get M_PI.
+#define _USE_MATH_DEFINES
+
+#include <cmath>
+
+#include "base/command_line.h"
+#include "base/logging.h"
+#include "base/memory/scoped_ptr.h"
+#include "base/memory/scoped_vector.h"
+#include "base/string_number_conversions.h"
+#include "base/time.h"
+#include "media/base/audio_converter.h"
+#include "media/base/fake_audio_render_callback.h"
+#include "testing/gmock/include/gmock/gmock.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace media {
+
+// Command line switch for runtime adjustment of benchmark iterations.
+static const char kBenchmarkIterations[] = "audio-converter-iterations";
+static const int kDefaultIterations = 10;
+
+// Parameters which control the many input case tests.
+static const int kConvertInputs = 8;
+static const int kConvertCycles = 3;
+
+// Parameters used for testing.
+static const int kBitsPerChannel = 32;
+static const ChannelLayout kChannelLayout = CHANNEL_LAYOUT_STEREO;
+static const int kHighLatencyBufferSize = 2048;
+static const int kLowLatencyBufferSize = 256;
+static const int kSampleRate = 48000;
+
+// Number of full sine wave cycles for each Render() call.
+static const int kSineCycles = 4;
+
+// Tuple of <input sampling rate, output sampling rate, epsilon>.
+typedef std::tr1::tuple<int, int, double> AudioConverterTestData;
+class AudioConverterTest
+ : public testing::TestWithParam<AudioConverterTestData> {
+ public:
+ AudioConverterTest()
+ : epsilon_(std::tr1::get<2>(GetParam())) {
+ // Create input and output parameters based on test parameters.
+ input_parameters_ = AudioParameters(
+ AudioParameters::AUDIO_PCM_LINEAR, kChannelLayout,
+ std::tr1::get<0>(GetParam()), kBitsPerChannel, kHighLatencyBufferSize);
+ output_parameters_ = AudioParameters(
+ AudioParameters::AUDIO_PCM_LOW_LATENCY, kChannelLayout,
+ std::tr1::get<1>(GetParam()), 16, kLowLatencyBufferSize);
+
+ converter_.reset(new AudioConverter(
+ input_parameters_, output_parameters_, false));
+
+ audio_bus_ = AudioBus::Create(output_parameters_);
+ expected_audio_bus_ = AudioBus::Create(output_parameters_);
+
+ // Allocate one callback for generating expected results.
+ double step = kSineCycles / static_cast<double>(
+ output_parameters_.frames_per_buffer());
+ expected_callback_.reset(new FakeAudioRenderCallback(step));
+ }
+
+ void InitializeInputs(int count) {
+ // Setup FakeAudioRenderCallback step to compensate for resampling.
+ double scale_factor = input_parameters_.sample_rate() /
+ static_cast<double>(output_parameters_.sample_rate());
+ double step = kSineCycles / (scale_factor *
+ static_cast<double>(output_parameters_.frames_per_buffer()));
+
+ for (int i = 0; i < count; ++i) {
+ fake_callbacks_.push_back(new FakeAudioRenderCallback(step));
+ converter_->AddInput(fake_callbacks_[i]);
+ }
+ }
+
+ void Reset() {
+ converter_->Reset();
+ for (size_t i = 0; i < fake_callbacks_.size(); ++i)
+ fake_callbacks_[i]->reset();
+ expected_callback_->reset();
+ }
+
+ void SetVolume(float volume) {
+ for (size_t i = 0; i < fake_callbacks_.size(); ++i)
+ fake_callbacks_[i]->set_volume(volume);
+ }
+
+ bool ValidateAudioData(int index, int frames, float scale) {
+ for (int i = 0; i < audio_bus_->channels(); ++i) {
+ for (int j = index; j < frames; j++) {
+ double error = fabs(audio_bus_->channel(i)[j] -
+ expected_audio_bus_->channel(i)[j] * scale);
+ if (error > epsilon_) {
+ EXPECT_NEAR(expected_audio_bus_->channel(i)[j] * scale,
+ audio_bus_->channel(i)[j], epsilon_)
+ << " i=" << i << ", j=" << j;
+ return false;
+ }
+ }
+ }
+ return true;
+ }
+
+ bool RenderAndValidateAudioData(float scale) {
+ // Render actual audio data.
+ converter_->Convert(audio_bus_.get());
+
+ // Render expected audio data.
+ expected_callback_->Render(expected_audio_bus_.get(), 0);
+
+ return ValidateAudioData(0, audio_bus_->frames(), scale);
+ }
+
+ // Fill |audio_bus_| fully with |value|.
+ void FillAudioData(float value) {
+ for (int i = 0; i < audio_bus_->channels(); ++i) {
+ std::fill(audio_bus_->channel(i),
+ audio_bus_->channel(i) + audio_bus_->frames(), value);
+ }
+ }
+
+ // Verify output with a number of transform inputs.
+ void RunTest(int inputs) {
+ InitializeInputs(inputs);
+
+ SetVolume(0);
+ for (int i = 0; i < kConvertCycles; ++i)
+ ASSERT_TRUE(RenderAndValidateAudioData(0));
+
+ Reset();
+
+ // Set a different volume for each input and verify the results.
+ float total_scale = 0;
+ for (size_t i = 0; i < fake_callbacks_.size(); ++i) {
+ float volume = static_cast<float>(i) / fake_callbacks_.size();
+ total_scale += volume;
+ fake_callbacks_[i]->set_volume(volume);
+ }
+ for (int i = 0; i < kConvertCycles; ++i)
+ ASSERT_TRUE(RenderAndValidateAudioData(total_scale));
+
+ Reset();
+
+ // Remove every other input.
+ for (size_t i = 1; i < fake_callbacks_.size(); i += 2)
+ converter_->RemoveInput(fake_callbacks_[i]);
+
+ SetVolume(1);
+ float scale = inputs > 1 ? inputs / 2.0f : inputs;
+ for (int i = 0; i < kConvertCycles; ++i)
+ ASSERT_TRUE(RenderAndValidateAudioData(scale));
+ }
+
+ protected:
+ virtual ~AudioConverterTest() {}
+
+ scoped_ptr<AudioConverter> converter_;
+ AudioParameters input_parameters_;
+ AudioParameters output_parameters_;
+ scoped_ptr<AudioBus> audio_bus_;
+ scoped_ptr<AudioBus> expected_audio_bus_;
+ ScopedVector<FakeAudioRenderCallback> fake_callbacks_;
+ scoped_ptr<FakeAudioRenderCallback> expected_callback_;
+ double epsilon_;
+
+ DISALLOW_COPY_AND_ASSIGN(AudioConverterTest);
+};
+
+// Ensure the buffer delay provided by AudioConverter is accurate.
+TEST(AudioConverterTest, AudioDelay) {
+ // Choose input and output parameters such that the transform must make
+ // multiple calls to fill the buffer.
+ AudioParameters input_parameters = AudioParameters(
+ AudioParameters::AUDIO_PCM_LINEAR, kChannelLayout, kSampleRate,
+ kBitsPerChannel, kLowLatencyBufferSize);
+ AudioParameters output_parameters = AudioParameters(
+ AudioParameters::AUDIO_PCM_LINEAR, kChannelLayout, kSampleRate * 2,
+ kBitsPerChannel, kHighLatencyBufferSize);
+
+ AudioConverter converter(input_parameters, output_parameters, false);
+ FakeAudioRenderCallback callback(0.2);
+ scoped_ptr<AudioBus> audio_bus = AudioBus::Create(output_parameters);
+ converter.AddInput(&callback);
+ converter.Convert(audio_bus.get());
+
+ // Calculate the expected buffer delay for given AudioParameters.
+ double input_sample_rate = input_parameters.sample_rate();
+ int fill_count =
+ (output_parameters.frames_per_buffer() * input_sample_rate /
+ output_parameters.sample_rate()) / input_parameters.frames_per_buffer();
+
+ base::TimeDelta input_frame_duration = base::TimeDelta::FromMicroseconds(
+ base::Time::kMicrosecondsPerSecond / input_sample_rate);
+
+ int expected_last_delay_milliseconds =
+ fill_count * input_parameters.frames_per_buffer() *
+ input_frame_duration.InMillisecondsF();
+
+ EXPECT_EQ(expected_last_delay_milliseconds,
+ callback.last_audio_delay_milliseconds());
+}
+
+// Benchmark for audio conversion. Original benchmarks were run with
+// --audio-converter-iterations=50000.
+TEST(AudioConverterTest, ConvertBenchmark) {
+ int benchmark_iterations = kDefaultIterations;
+ std::string iterations(CommandLine::ForCurrentProcess()->GetSwitchValueASCII(
+ kBenchmarkIterations));
+ base::StringToInt(iterations, &benchmark_iterations);
+ if (benchmark_iterations < kDefaultIterations)
+ benchmark_iterations = kDefaultIterations;
+
+ // Create input and output parameters to convert between the two most common
+ // sets of parameters (as indicated via UMA data).
+ AudioParameters input_params(
+ AudioParameters::AUDIO_PCM_LINEAR, CHANNEL_LAYOUT_MONO, 48000, 16, 2048);
+ AudioParameters output_params(
+ AudioParameters::AUDIO_PCM_LINEAR, CHANNEL_LAYOUT_STEREO, 44100, 16, 440);
+ scoped_ptr<AudioConverter> converter(
+ new AudioConverter(input_params, output_params, false));
+
+ scoped_ptr<AudioBus> output_bus = AudioBus::Create(output_params);
+ FakeAudioRenderCallback fake_input1(0.2);
+ FakeAudioRenderCallback fake_input2(0.4);
+ FakeAudioRenderCallback fake_input3(0.6);
+ converter->AddInput(&fake_input1);
+ converter->AddInput(&fake_input2);
+ converter->AddInput(&fake_input3);
+
+ printf("Benchmarking %d iterations:\n", benchmark_iterations);
+
+ // Benchmark Convert() w/ FIFO.
+ base::TimeTicks start = base::TimeTicks::HighResNow();
+ for (int i = 0; i < benchmark_iterations; ++i) {
+ converter->Convert(output_bus.get());
+ }
+ double total_time_ms =
+ (base::TimeTicks::HighResNow() - start).InMillisecondsF();
+ printf("Convert() w/ FIFO took %.2fms.\n", total_time_ms);
+
+ converter.reset(new AudioConverter(input_params, output_params, true));
+ converter->AddInput(&fake_input1);
+ converter->AddInput(&fake_input2);
+ converter->AddInput(&fake_input3);
+
+ // Benchmark Convert() w/o FIFO.
+ start = base::TimeTicks::HighResNow();
+ for (int i = 0; i < benchmark_iterations; ++i) {
+ converter->Convert(output_bus.get());
+ }
+ total_time_ms =
+ (base::TimeTicks::HighResNow() - start).InMillisecondsF();
+ printf("Convert() w/o FIFO took %.2fms.\n", total_time_ms);
+}
+
+TEST_P(AudioConverterTest, NoInputs) {
+ FillAudioData(1.0f);
+ EXPECT_TRUE(RenderAndValidateAudioData(0.0f));
+}
+
+TEST_P(AudioConverterTest, OneInput) {
+ RunTest(1);
+}
+
+TEST_P(AudioConverterTest, ManyInputs) {
+ RunTest(kConvertInputs);
+}
+
+INSTANTIATE_TEST_CASE_P(
+ // TODO(dalecurtis): Add test cases for channel transforms.
+ AudioConverterTest, AudioConverterTest, testing::Values(
+ // No resampling.
+ std::tr1::make_tuple(44100, 44100, 0.00000048),
+
+ // Upsampling.
+ std::tr1::make_tuple(44100, 48000, 0.033),
+
+ // Downsampling.
+ std::tr1::make_tuple(48000, 41000, 0.042)));
+
+} // namespace media
diff --git a/media/base/audio_pull_fifo.cc b/media/base/audio_pull_fifo.cc
index b1622e0..4943591 100644
--- a/media/base/audio_pull_fifo.cc
+++ b/media/base/audio_pull_fifo.cc
@@ -33,7 +33,7 @@ void AudioPullFifo::Consume(AudioBus* destination, int frames_to_consume) {
// Get the remaining audio frames from the producer using the callback.
while (remaining_frames_to_provide > 0) {
// Fill up the FIFO by acquiring audio data from the producer.
- read_cb_.Run(bus_.get());
+ read_cb_.Run(write_pos, bus_.get());
fifo_->Push(bus_.get());
// Try to fulfill the request using what's available in the FIFO.
diff --git a/media/base/audio_pull_fifo.h b/media/base/audio_pull_fifo.h
index 9fd3a8e..caf73e4 100644
--- a/media/base/audio_pull_fifo.h
+++ b/media/base/audio_pull_fifo.h
@@ -20,8 +20,9 @@ class MEDIA_EXPORT AudioPullFifo {
public:
// Callback type for providing more data into the FIFO. Expects AudioBus
// to be completely filled with data upon return; zero padded if not enough
- // frames are available to satisfy the request.
- typedef base::Callback<void(AudioBus* audio_bus)> ReadCB;
+ // frames are available to satisfy the request. |frame_delay| is the number
+ // of output frames already processed and can be used to estimate delay.
+ typedef base::Callback<void(int frame_delay, AudioBus* audio_bus)> ReadCB;
// Constructs an AudioPullFifo with the specified |read_cb|, which is used to
// read audio data to the FIFO if data is not already available. The internal
diff --git a/media/base/audio_pull_fifo_unittest.cc b/media/base/audio_pull_fifo_unittest.cc
index e5c005d..cec4d35 100644
--- a/media/base/audio_pull_fifo_unittest.cc
+++ b/media/base/audio_pull_fifo_unittest.cc
@@ -29,7 +29,8 @@ class AudioPullFifoTest
: pull_fifo_(kChannels, kMaxFramesInFifo, base::Bind(
&AudioPullFifoTest::ProvideInput, base::Unretained(this))),
audio_bus_(AudioBus::Create(kChannels, kMaxFramesInFifo)),
- fill_value_(0) {}
+ fill_value_(0),
+ last_frame_delay_(-1) {}
virtual ~AudioPullFifoTest() {}
void VerifyValue(const float data[], int size, float start_value) {
@@ -51,12 +52,16 @@ class AudioPullFifoTest
VerifyValue(audio_bus_->channel(j), frames_to_consume, start_value);
}
start_value += frames_to_consume;
+ EXPECT_LT(last_frame_delay_, audio_bus_->frames());
}
// AudioPullFifo::ReadCB implementation where we increase a value for each
// audio frame that we provide. Note that all channels are given the same
// value to simplify the verification.
- virtual void ProvideInput(AudioBus* audio_bus) {
+ virtual void ProvideInput(int frame_delay, AudioBus* audio_bus) {
+ ASSERT_GT(frame_delay, last_frame_delay_);
+ last_frame_delay_ = frame_delay;
+
EXPECT_EQ(audio_bus->channels(), audio_bus_->channels());
EXPECT_EQ(audio_bus->frames(), kMaxFramesInFifo);
for (int i = 0; i < audio_bus->frames(); ++i) {
@@ -72,6 +77,7 @@ class AudioPullFifoTest
AudioPullFifo pull_fifo_;
scoped_ptr<AudioBus> audio_bus_;
int fill_value_;
+ int last_frame_delay_;
DISALLOW_COPY_AND_ASSIGN(AudioPullFifoTest);
};
diff --git a/media/base/audio_renderer_mixer.cc b/media/base/audio_renderer_mixer.cc
index 4df2eea..003f6dd 100644
--- a/media/base/audio_renderer_mixer.cc
+++ b/media/base/audio_renderer_mixer.cc
@@ -7,9 +7,6 @@
#include "base/bind.h"
#include "base/bind_helpers.h"
#include "base/logging.h"
-#include "media/audio/audio_util.h"
-#include "media/base/limits.h"
-#include "media/base/vector_math.h"
namespace media {
@@ -17,26 +14,7 @@ AudioRendererMixer::AudioRendererMixer(
const AudioParameters& input_params, const AudioParameters& output_params,
const scoped_refptr<AudioRendererSink>& sink)
: audio_sink_(sink),
- current_audio_delay_milliseconds_(0),
- io_ratio_(1),
- input_ms_per_frame_(
- static_cast<double>(base::Time::kMillisecondsPerSecond) /
- input_params.sample_rate()) {
- DCHECK(input_params.IsValid());
- DCHECK(output_params.IsValid());
-
- // Channel mixing is handled by the browser side currently.
- DCHECK_EQ(input_params.channels(), output_params.channels());
-
- // Only resample if necessary since it's expensive.
- if (input_params.sample_rate() != output_params.sample_rate()) {
- io_ratio_ = input_params.sample_rate() /
- static_cast<double>(output_params.sample_rate());
- resampler_.reset(new MultiChannelResampler(
- output_params.channels(), io_ratio_,
- base::Bind(&AudioRendererMixer::ProvideInput, base::Unretained(this))));
- }
-
+ audio_converter_(input_params, output_params, true) {
audio_sink_->Initialize(output_params, this);
audio_sink_->Start();
}
@@ -53,78 +31,29 @@ AudioRendererMixer::~AudioRendererMixer() {
void AudioRendererMixer::AddMixerInput(
const scoped_refptr<AudioRendererMixerInput>& input) {
base::AutoLock auto_lock(mixer_inputs_lock_);
- mixer_inputs_.insert(input);
+ mixer_inputs_.push_back(input);
+ audio_converter_.AddInput(input);
}
void AudioRendererMixer::RemoveMixerInput(
const scoped_refptr<AudioRendererMixerInput>& input) {
base::AutoLock auto_lock(mixer_inputs_lock_);
- mixer_inputs_.erase(input);
+ audio_converter_.RemoveInput(input);
+ mixer_inputs_.remove(input);
}
int AudioRendererMixer::Render(AudioBus* audio_bus,
int audio_delay_milliseconds) {
- current_audio_delay_milliseconds_ = audio_delay_milliseconds / io_ratio_;
-
- if (resampler_.get())
- resampler_->Resample(audio_bus, audio_bus->frames());
- else
- ProvideInput(audio_bus);
-
- // Always return the full number of frames requested, ProvideInput() will pad
- // with silence if it wasn't able to acquire enough data.
- return audio_bus->frames();
-}
-
-void AudioRendererMixer::ProvideInput(AudioBus* audio_bus) {
base::AutoLock auto_lock(mixer_inputs_lock_);
- // Allocate staging area for each mixer input's audio data on first call. We
- // won't know how much to allocate until here because of resampling. Ensure
- // our intermediate AudioBus is sized exactly as the original. Resize should
- // only happen once due to the way the resampler works.
- if (!mixer_input_audio_bus_.get() ||
- mixer_input_audio_bus_->frames() != audio_bus->frames()) {
- mixer_input_audio_bus_ =
- AudioBus::Create(audio_bus->channels(), audio_bus->frames());
- }
-
- // Sanity check our inputs.
- DCHECK_EQ(audio_bus->frames(), mixer_input_audio_bus_->frames());
- DCHECK_EQ(audio_bus->channels(), mixer_input_audio_bus_->channels());
-
- // Zero |audio_bus| so we're mixing into a clean buffer and return silence if
- // we couldn't get enough data from our inputs.
- audio_bus->Zero();
-
- // Have each mixer render its data into an output buffer then mix the result.
+ // Set the delay information for each mixer input.
for (AudioRendererMixerInputSet::iterator it = mixer_inputs_.begin();
it != mixer_inputs_.end(); ++it) {
- const scoped_refptr<AudioRendererMixerInput>& input = *it;
-
- double volume;
- input->GetVolume(&volume);
-
- // Nothing to do if the input isn't playing.
- if (!input->playing())
- continue;
-
- int frames_filled = input->callback()->Render(
- mixer_input_audio_bus_.get(), current_audio_delay_milliseconds_);
- if (frames_filled == 0)
- continue;
-
- // Volume adjust and mix each mixer input into |audio_bus| after rendering.
- for (int i = 0; i < audio_bus->channels(); ++i) {
- vector_math::FMAC(
- mixer_input_audio_bus_->channel(i), volume, frames_filled,
- audio_bus->channel(i));
- }
+ (*it)->set_audio_delay_milliseconds(audio_delay_milliseconds);
}
- // Update the delay estimate.
- current_audio_delay_milliseconds_ +=
- audio_bus->frames() * input_ms_per_frame_;
+ audio_converter_.Convert(audio_bus);
+ return audio_bus->frames();
}
void AudioRendererMixer::OnRenderError() {
@@ -133,7 +62,7 @@ void AudioRendererMixer::OnRenderError() {
// Call each mixer input and signal an error.
for (AudioRendererMixerInputSet::iterator it = mixer_inputs_.begin();
it != mixer_inputs_.end(); ++it) {
- (*it)->callback()->OnRenderError();
+ (*it)->OnRenderError();
}
}
diff --git a/media/base/audio_renderer_mixer.h b/media/base/audio_renderer_mixer.h
index 7bb85af..bac048d 100644
--- a/media/base/audio_renderer_mixer.h
+++ b/media/base/audio_renderer_mixer.h
@@ -5,20 +5,18 @@
#ifndef MEDIA_BASE_AUDIO_RENDERER_MIXER_H_
#define MEDIA_BASE_AUDIO_RENDERER_MIXER_H_
-#include <set>
+#include <list>
#include "base/synchronization/lock.h"
+#include "media/base/audio_converter.h"
#include "media/base/audio_renderer_mixer_input.h"
#include "media/base/audio_renderer_sink.h"
-#include "media/base/multi_channel_resampler.h"
namespace media {
// Mixes a set of AudioRendererMixerInputs into a single output stream which is
// funneled into a single shared AudioRendererSink; saving a bundle on renderer
-// side resources. Resampling is done post-mixing as it is the most expensive
-// process. If the input sample rate matches the audio hardware sample rate, no
-// resampling is done.
+// side resources.
class MEDIA_EXPORT AudioRendererMixer
: NON_EXPORTED_BASE(public AudioRendererSink::RenderCallback) {
public:
@@ -37,33 +35,18 @@ class MEDIA_EXPORT AudioRendererMixer
int audio_delay_milliseconds) OVERRIDE;
virtual void OnRenderError() OVERRIDE;
- // Handles mixing and volume adjustment. Fully fills |audio_bus| with mixed
- // audio data. When resampling is necessary, ProvideInput() will be called
- // by MultiChannelResampler when more data is necessary.
- void ProvideInput(AudioBus* audio_bus);
-
// Output sink for this mixer.
scoped_refptr<AudioRendererSink> audio_sink_;
// Set of mixer inputs to be mixed by this mixer. Access is thread-safe
// through |mixer_inputs_lock_|.
- typedef std::set< scoped_refptr<AudioRendererMixerInput> >
+ typedef std::list<scoped_refptr<AudioRendererMixerInput> >
AudioRendererMixerInputSet;
AudioRendererMixerInputSet mixer_inputs_;
base::Lock mixer_inputs_lock_;
- // Vector for rendering audio data from each mixer input.
- scoped_ptr<AudioBus> mixer_input_audio_bus_;
-
- // Handles resampling post-mixing.
- scoped_ptr<MultiChannelResampler> resampler_;
-
- // The audio delay in milliseconds received by the last Render() call.
- int current_audio_delay_milliseconds_;
-
- // Ratio of input data to output data. Used to scale audio delay information.
- double io_ratio_;
- double input_ms_per_frame_;
+ // Handles mixing and resampling between input and output parameters.
+ AudioConverter audio_converter_;
DISALLOW_COPY_AND_ASSIGN(AudioRendererMixer);
};
diff --git a/media/base/audio_renderer_mixer_input.cc b/media/base/audio_renderer_mixer_input.cc
index c748f8f..763472a 100644
--- a/media/base/audio_renderer_mixer_input.cc
+++ b/media/base/audio_renderer_mixer_input.cc
@@ -18,7 +18,8 @@ AudioRendererMixerInput::AudioRendererMixerInput(
get_mixer_cb_(get_mixer_cb),
remove_mixer_cb_(remove_mixer_cb),
mixer_(NULL),
- callback_(NULL) {
+ callback_(NULL),
+ current_audio_delay_milliseconds_(0) {
}
AudioRendererMixerInput::~AudioRendererMixerInput() {
@@ -72,8 +73,27 @@ bool AudioRendererMixerInput::SetVolume(double volume) {
return true;
}
-void AudioRendererMixerInput::GetVolume(double* volume) {
- *volume = volume_;
+double AudioRendererMixerInput::ProvideInput(AudioBus* audio_bus,
+ base::TimeDelta buffer_delay) {
+ int frames_filled = 0;
+
+ if (playing_) {
+ frames_filled = callback_->Render(
+ audio_bus,
+ current_audio_delay_milliseconds_ + buffer_delay.InMilliseconds());
+
+ // AudioConverter expects unfilled frames to be zeroed.
+ if (frames_filled < audio_bus->frames()) {
+ audio_bus->ZeroFramesPartial(
+ frames_filled, audio_bus->frames() - frames_filled);
+ }
+ }
+
+ return frames_filled > 0 ? volume_ : 0;
+}
+
+void AudioRendererMixerInput::OnRenderError() {
+ callback_->OnRenderError();
}
} // namespace media
diff --git a/media/base/audio_renderer_mixer_input.h b/media/base/audio_renderer_mixer_input.h
index 486f5c2..023badd 100644
--- a/media/base/audio_renderer_mixer_input.h
+++ b/media/base/audio_renderer_mixer_input.h
@@ -8,6 +8,7 @@
#include <vector>
#include "base/callback.h"
+#include "media/base/audio_converter.h"
#include "media/base/audio_renderer_sink.h"
namespace media {
@@ -15,7 +16,8 @@ namespace media {
class AudioRendererMixer;
class MEDIA_EXPORT AudioRendererMixerInput
- : NON_EXPORTED_BASE(public AudioRendererSink) {
+ : NON_EXPORTED_BASE(public AudioRendererSink),
+ public AudioConverter::InputCallback {
public:
typedef base::Callback<AudioRendererMixer*(
const AudioParameters& params)> GetMixerCB;
@@ -24,9 +26,6 @@ class MEDIA_EXPORT AudioRendererMixerInput
AudioRendererMixerInput(
const GetMixerCB& get_mixer_cb, const RemoveMixerCB& remove_mixer_cb);
- AudioRendererSink::RenderCallback* callback() { return callback_; }
- bool playing() { return playing_; }
-
// AudioRendererSink implementation.
virtual void Start() OVERRIDE;
virtual void Stop() OVERRIDE;
@@ -36,17 +35,29 @@ class MEDIA_EXPORT AudioRendererMixerInput
virtual void Initialize(const AudioParameters& params,
AudioRendererSink::RenderCallback* renderer) OVERRIDE;
- void GetVolume(double* volume);
+ // Called by AudioRendererMixer when new delay information is available.
+ void set_audio_delay_milliseconds(int audio_delay_milliseconds) {
+ current_audio_delay_milliseconds_ = audio_delay_milliseconds;
+ }
+
+ // Called by AudioRendererMixer when an error occurs.
+ void OnRenderError();
protected:
virtual ~AudioRendererMixerInput();
private:
+ friend class AudioRendererMixerInputTest;
+
bool playing_;
bool initialized_;
bool started_;
double volume_;
+ // AudioConverter::InputCallback implementation.
+ virtual double ProvideInput(AudioBus* audio_bus,
+ base::TimeDelta buffer_delay) OVERRIDE;
+
// Callbacks provided during construction which allow AudioRendererMixerInput
// to retrieve a mixer during Initialize() and notify when it's done with it.
GetMixerCB get_mixer_cb_;
@@ -62,6 +73,9 @@ class MEDIA_EXPORT AudioRendererMixerInput
// Source of audio data which is provided to the mixer.
AudioRendererSink::RenderCallback* callback_;
+ // The current audio delay as last provided by AudioRendererMixer.
+ int current_audio_delay_milliseconds_;
+
DISALLOW_COPY_AND_ASSIGN(AudioRendererMixerInput);
};
diff --git a/media/base/audio_renderer_mixer_input_unittest.cc b/media/base/audio_renderer_mixer_input_unittest.cc
index a3cb3bc..5b4aac2 100644
--- a/media/base/audio_renderer_mixer_input_unittest.cc
+++ b/media/base/audio_renderer_mixer_input_unittest.cc
@@ -29,6 +29,7 @@ class AudioRendererMixerInputTest : public testing::Test {
fake_callback_.reset(new FakeAudioRenderCallback(0));
mixer_input_->Initialize(audio_parameters_, fake_callback_.get());
EXPECT_CALL(*this, RemoveMixer(testing::_));
+ audio_bus_ = AudioBus::Create(audio_parameters_);
}
void CreateMixerInput() {
@@ -51,6 +52,14 @@ class AudioRendererMixerInputTest : public testing::Test {
return mixer_.get();
}
+ double ProvideInput() {
+ return mixer_input_->ProvideInput(audio_bus_.get(), base::TimeDelta());
+ }
+
+ int GetAudioDelayMilliseconds() {
+ return mixer_input_->current_audio_delay_milliseconds_;
+ }
+
MOCK_METHOD1(RemoveMixer, void(const AudioParameters&));
protected:
@@ -60,26 +69,25 @@ class AudioRendererMixerInputTest : public testing::Test {
scoped_ptr<AudioRendererMixer> mixer_;
scoped_refptr<AudioRendererMixerInput> mixer_input_;
scoped_ptr<FakeAudioRenderCallback> fake_callback_;
+ scoped_ptr<AudioBus> audio_bus_;
DISALLOW_COPY_AND_ASSIGN(AudioRendererMixerInputTest);
};
-// Test callback() works as expected.
-TEST_F(AudioRendererMixerInputTest, GetCallback) {
- EXPECT_EQ(mixer_input_->callback(), fake_callback_.get());
-}
-
-// Test that getting and setting the volume work as expected.
+// Test that getting and setting the volume work as expected. The volume is
+// returned from ProvideInput() only when playing.
TEST_F(AudioRendererMixerInputTest, GetSetVolume) {
- // Starting volume should be 0.
- double volume = 1.0f;
- mixer_input_->GetVolume(&volume);
- EXPECT_EQ(volume, 1.0f);
+ mixer_input_->Start();
+ mixer_input_->Play();
+
+ // Starting volume should be 1.0.
+ EXPECT_DOUBLE_EQ(ProvideInput(), 1);
- const double kVolume = 0.5f;
+ const double kVolume = 0.5;
EXPECT_TRUE(mixer_input_->SetVolume(kVolume));
- mixer_input_->GetVolume(&volume);
- EXPECT_EQ(volume, kVolume);
+ EXPECT_DOUBLE_EQ(ProvideInput(), kVolume);
+
+ mixer_input_->Stop();
}
// Test Start()/Play()/Pause()/Stop()/playing() all work as expected. Also
@@ -87,15 +95,15 @@ TEST_F(AudioRendererMixerInputTest, GetSetVolume) {
// crashing; functional tests for these methods are in AudioRendererMixerTest.
TEST_F(AudioRendererMixerInputTest, StartPlayPauseStopPlaying) {
mixer_input_->Start();
- EXPECT_FALSE(mixer_input_->playing());
+ EXPECT_DOUBLE_EQ(ProvideInput(), 0);
mixer_input_->Play();
- EXPECT_TRUE(mixer_input_->playing());
+ EXPECT_DOUBLE_EQ(ProvideInput(), 1);
mixer_input_->Pause(false);
- EXPECT_FALSE(mixer_input_->playing());
+ EXPECT_DOUBLE_EQ(ProvideInput(), 0);
mixer_input_->Play();
- EXPECT_TRUE(mixer_input_->playing());
+ EXPECT_DOUBLE_EQ(ProvideInput(), 1);
mixer_input_->Stop();
- EXPECT_FALSE(mixer_input_->playing());
+ EXPECT_DOUBLE_EQ(ProvideInput(), 0);
}
// Test that Stop() can be called before Initialize() and Start().
diff --git a/media/base/audio_renderer_mixer_unittest.cc b/media/base/audio_renderer_mixer_unittest.cc
index aad2313..7315b85f 100644
--- a/media/base/audio_renderer_mixer_unittest.cc
+++ b/media/base/audio_renderer_mixer_unittest.cc
@@ -77,8 +77,8 @@ class AudioRendererMixerTest
fake_callbacks_.reserve(count);
// Setup FakeAudioRenderCallback step to compensate for resampling.
- double scale_factor = input_parameters_.sample_rate()
- / static_cast<double>(output_parameters_.sample_rate());
+ double scale_factor = input_parameters_.sample_rate() /
+ static_cast<double>(output_parameters_.sample_rate());
double step = kSineCycles / (scale_factor *
static_cast<double>(output_parameters_.frames_per_buffer()));
@@ -95,14 +95,14 @@ class AudioRendererMixerTest
EXPECT_CALL(*this, RemoveMixer(testing::_)).Times(count);
}
- bool ValidateAudioData(int index, int frames, float scale) {
+ bool ValidateAudioData(int index, int frames, float scale, double epsilon) {
for (int i = 0; i < audio_bus_->channels(); ++i) {
for (int j = index; j < frames; j++) {
double error = fabs(audio_bus_->channel(i)[j] -
expected_audio_bus_->channel(i)[j] * scale);
- if (error > epsilon_) {
+ if (error > epsilon) {
EXPECT_NEAR(expected_audio_bus_->channel(i)[j] * scale,
- audio_bus_->channel(i)[j], epsilon_)
+ audio_bus_->channel(i)[j], epsilon)
<< " i=" << i << ", j=" << j;
return false;
}
@@ -111,18 +111,15 @@ class AudioRendererMixerTest
return true;
}
- bool RenderAndValidateAudioData(float scale) {
- // Half fill won't be exactly half when resampling since the resampler
- // will have enough data to fill out more of the buffer based on its
- // internal buffer and kernel size. So special case some of the checks.
- bool resampling = input_parameters_.sample_rate()
- != output_parameters_.sample_rate();
+ bool ValidateAudioData(int index, int frames, float scale) {
+ return ValidateAudioData(index, frames, scale, epsilon_);
+ }
+ bool RenderAndValidateAudioData(float scale) {
if (half_fill_) {
for (size_t i = 0; i < fake_callbacks_.size(); ++i)
fake_callbacks_[i]->set_half_fill(true);
expected_callback_->set_half_fill(true);
- expected_audio_bus_->Zero();
}
// Render actual audio data.
@@ -134,13 +131,10 @@ class AudioRendererMixerTest
expected_callback_->Render(expected_audio_bus_.get(), 0);
if (half_fill_) {
- // Verify first half of audio data for both resampling and non-resampling.
- if (!ValidateAudioData(0, frames / 2, scale))
- return false;
- // Verify silence in the second half if we're not resampling.
- if (!resampling)
- return ValidateAudioData(frames / 2, frames, 0);
- return true;
+ // In this case, just verify that every frame was initialized, this will
+ // only fail under tooling such as valgrind.
+ return ValidateAudioData(
+ 0, frames, 0, std::numeric_limits<double>::max());
} else {
return ValidateAudioData(0, frames, scale);
}
@@ -388,26 +382,6 @@ TEST_P(AudioRendererMixerTest, OnRenderError) {
mixer_inputs_[i]->Stop();
}
-// Verify that audio delay information is scaled to the input parameters.
-TEST_P(AudioRendererMixerTest, DelayTest) {
- InitializeInputs(1);
- static const int kAudioDelayMilliseconds = 100;
- ASSERT_EQ(mixer_inputs_.size(), 1u);
-
- // Start the input and issue a single render callback.
- mixer_inputs_[0]->Start();
- mixer_inputs_[0]->Play();
- mixer_callback_->Render(audio_bus_.get(), kAudioDelayMilliseconds);
-
- // The input to output ratio should only include the sample rate difference.
- double io_ratio = input_parameters_.sample_rate() /
- static_cast<double>(output_parameters_.sample_rate());
-
- EXPECT_EQ(static_cast<int>(kAudioDelayMilliseconds / io_ratio),
- fake_callbacks_[0]->last_audio_delay_milliseconds());
- mixer_inputs_[0]->Stop();
-}
-
// Ensure constructing an AudioRendererMixerInput, but not initializing it does
// not call RemoveMixer().
TEST_P(AudioRendererMixerTest, NoInitialize) {
diff --git a/media/base/fake_audio_render_callback.cc b/media/base/fake_audio_render_callback.cc
index 65b6ac9..af55910 100644
--- a/media/base/fake_audio_render_callback.cc
+++ b/media/base/fake_audio_render_callback.cc
@@ -5,16 +5,17 @@
// MSVC++ requires this to be set before any other includes to get M_PI.
#define _USE_MATH_DEFINES
-#include "media/base/fake_audio_render_callback.h"
-
#include <cmath>
+#include "media/base/fake_audio_render_callback.h"
+
namespace media {
FakeAudioRenderCallback::FakeAudioRenderCallback(double step)
: half_fill_(false),
step_(step),
- last_audio_delay_milliseconds_(-1) {
+ last_audio_delay_milliseconds_(-1),
+ volume_(1) {
reset();
}
@@ -40,4 +41,10 @@ int FakeAudioRenderCallback::Render(AudioBus* audio_bus,
return number_of_frames;
}
+double FakeAudioRenderCallback::ProvideInput(AudioBus* audio_bus,
+ base::TimeDelta buffer_delay) {
+ Render(audio_bus, buffer_delay.InMilliseconds());
+ return volume_;
+}
+
} // namespace media
diff --git a/media/base/fake_audio_render_callback.h b/media/base/fake_audio_render_callback.h
index 760e39d..5318c99 100644
--- a/media/base/fake_audio_render_callback.h
+++ b/media/base/fake_audio_render_callback.h
@@ -5,6 +5,7 @@
#ifndef MEDIA_BASE_FAKE_AUDIO_RENDER_CALLBACK_H_
#define MEDIA_BASE_FAKE_AUDIO_RENDER_CALLBACK_H_
+#include "media/base/audio_converter.h"
#include "media/base/audio_renderer_sink.h"
#include "testing/gmock/include/gmock/gmock.h"
@@ -12,7 +13,10 @@ namespace media {
// Fake RenderCallback which will fill each request with a sine wave. Sine
// state is kept across callbacks. State can be reset to default via reset().
-class FakeAudioRenderCallback : public AudioRendererSink::RenderCallback {
+// Also provide an interface to AudioTransformInput.
+class FakeAudioRenderCallback
+ : public AudioRendererSink::RenderCallback,
+ public AudioConverter::InputCallback {
public:
// The function used to fulfill Render() is f(x) = sin(2 * PI * x * |step|),
// where x = [|number_of_frames| * m, |number_of_frames| * (m + 1)] and m =
@@ -22,9 +26,14 @@ class FakeAudioRenderCallback : public AudioRendererSink::RenderCallback {
// Renders a sine wave into the provided audio data buffer. If |half_fill_|
// is set, will only fill half the buffer.
- int Render(AudioBus* audio_bus, int audio_delay_milliseconds) OVERRIDE;
+ virtual int Render(AudioBus* audio_bus,
+ int audio_delay_milliseconds) OVERRIDE;
MOCK_METHOD0(OnRenderError, void());
+ // AudioTransform::ProvideAudioTransformInput implementation.
+ virtual double ProvideInput(AudioBus* audio_bus,
+ base::TimeDelta buffer_delay) OVERRIDE;
+
// Toggles only filling half the requested amount during Render().
void set_half_fill(bool half_fill) { half_fill_ = half_fill; }
@@ -35,11 +44,15 @@ class FakeAudioRenderCallback : public AudioRendererSink::RenderCallback {
// no Render() call occurred.
int last_audio_delay_milliseconds() { return last_audio_delay_milliseconds_; }
+ // Set volume information used by ProvideAudioTransformInput().
+ void set_volume(double volume) { volume_ = volume; }
+
private:
bool half_fill_;
double x_;
double step_;
int last_audio_delay_milliseconds_;
+ double volume_;
DISALLOW_COPY_AND_ASSIGN(FakeAudioRenderCallback);
};
diff --git a/media/base/multi_channel_resampler.cc b/media/base/multi_channel_resampler.cc
index b8df97d..a5cbf3ef 100644
--- a/media/base/multi_channel_resampler.cc
+++ b/media/base/multi_channel_resampler.cc
@@ -15,7 +15,8 @@ MultiChannelResampler::MultiChannelResampler(int channels,
double io_sample_rate_ratio,
const ReadCB& read_cb)
: last_frame_count_(0),
- read_cb_(read_cb) {
+ read_cb_(read_cb),
+ output_frames_ready_(0) {
// Allocate each channel's resampler.
resamplers_.reserve(channels);
for (int i = 0; i < channels; ++i) {
@@ -33,10 +34,10 @@ void MultiChannelResampler::Resample(AudioBus* audio_bus, int frames) {
// channel. To ensure this, we chunk the number of requested frames into
// SincResampler::ChunkSize() sized chunks. SincResampler guarantees it will
// only call ProvideInput() once when we resample this way.
- int frames_done = 0;
+ output_frames_ready_ = 0;
int chunk_size = resamplers_[0]->ChunkSize();
- while (frames_done < frames) {
- int frames_this_time = std::min(frames - frames_done, chunk_size);
+ while (output_frames_ready_ < frames) {
+ int frames_this_time = std::min(frames - output_frames_ready_, chunk_size);
// Resample each channel.
for (size_t i = 0; i < resamplers_.size(); ++i) {
@@ -49,10 +50,10 @@ void MultiChannelResampler::Resample(AudioBus* audio_bus, int frames) {
// since they all buffer in the same way and are processing the same
// number of frames.
resamplers_[i]->Resample(
- audio_bus->channel(i) + frames_done, frames_this_time);
+ audio_bus->channel(i) + output_frames_ready_, frames_this_time);
}
- frames_done += frames_this_time;
+ output_frames_ready_ += frames_this_time;
}
}
@@ -82,7 +83,7 @@ void MultiChannelResampler::ProvideInput(int channel, float* destination,
}
last_frame_count_ = frames;
- read_cb_.Run(wrapped_resampler_audio_bus_.get());
+ read_cb_.Run(output_frames_ready_, wrapped_resampler_audio_bus_.get());
} else {
// All channels must ask for the same amount. This should always be the
// case, but let's just make sure.
diff --git a/media/base/multi_channel_resampler.h b/media/base/multi_channel_resampler.h
index 748bb47..6dd565b 100644
--- a/media/base/multi_channel_resampler.h
+++ b/media/base/multi_channel_resampler.h
@@ -21,8 +21,9 @@ class MEDIA_EXPORT MultiChannelResampler {
public:
// Callback type for providing more data into the resampler. Expects AudioBus
// to be completely filled with data upon return; zero padded if not enough
- // frames are available to satisfy the request.
- typedef base::Callback<void(AudioBus* audio_bus)> ReadCB;
+ // frames are available to satisfy the request. |frame_delay| is the number
+ // of output frames already processed and can be used to estimate delay.
+ typedef base::Callback<void(int frame_delay, AudioBus* audio_bus)> ReadCB;
// Constructs a MultiChannelResampler with the specified |read_cb|, which is
// used to acquire audio data for resampling. |io_sample_rate_ratio| is the
@@ -56,6 +57,12 @@ class MEDIA_EXPORT MultiChannelResampler {
scoped_ptr<AudioBus> resampler_audio_bus_;
scoped_ptr<AudioBus> wrapped_resampler_audio_bus_;
std::vector<float*> resampler_audio_data_;
+
+ // The number of output frames that have successfully been processed during
+ // the current Resample() call.
+ int output_frames_ready_;
+
+ DISALLOW_COPY_AND_ASSIGN(MultiChannelResampler);
};
} // namespace media
diff --git a/media/base/multi_channel_resampler_unittest.cc b/media/base/multi_channel_resampler_unittest.cc
index 623c9ef..ad67550 100644
--- a/media/base/multi_channel_resampler_unittest.cc
+++ b/media/base/multi_channel_resampler_unittest.cc
@@ -37,7 +37,9 @@ static const double kHighLatencyMaxError = 0.04;
class MultiChannelResamplerTest
: public testing::TestWithParam<int> {
public:
- MultiChannelResamplerTest() {}
+ MultiChannelResamplerTest()
+ : last_frame_delay_(-1) {
+ }
virtual ~MultiChannelResamplerTest() {}
void InitializeAudioData(int channels, int frames) {
@@ -47,7 +49,10 @@ class MultiChannelResamplerTest
// MultiChannelResampler::MultiChannelAudioSourceProvider implementation, just
// fills the provided audio_data with |kFillValue|.
- virtual void ProvideInput(AudioBus* audio_bus) {
+ virtual void ProvideInput(int frame_delay, AudioBus* audio_bus) {
+ EXPECT_GT(frame_delay, last_frame_delay_);
+ last_frame_delay_ = frame_delay;
+
float fill_value = fill_junk_values_ ? (1 / kFillValue) : kFillValue;
EXPECT_EQ(audio_bus->channels(), audio_bus_->channels());
for (int i = 0; i < audio_bus->channels(); ++i)
@@ -58,15 +63,19 @@ class MultiChannelResamplerTest
void MultiChannelTest(int channels, int frames, double expected_max_rms_error,
double expected_max_error) {
InitializeAudioData(channels, frames);
- MultiChannelResampler resampler(
- channels, kScaleFactor, base::Bind(
- &MultiChannelResamplerTest::ProvideInput,
- base::Unretained(this)));
+ MultiChannelResampler resampler(channels, kScaleFactor, base::Bind(
+ &MultiChannelResamplerTest::ProvideInput, base::Unretained(this)));
+
// First prime the resampler with some junk data, so we can verify Flush().
fill_junk_values_ = true;
resampler.Resample(audio_bus_.get(), 1);
resampler.Flush();
fill_junk_values_ = false;
+
+ // The last frame delay should be strictly less than the total frame count.
+ EXPECT_LT(last_frame_delay_, audio_bus_->frames());
+ last_frame_delay_ = -1;
+
// If Flush() didn't work, the rest of the tests will fail.
resampler.Resample(audio_bus_.get(), frames);
TestValues(expected_max_rms_error, expected_max_error);
@@ -108,6 +117,7 @@ class MultiChannelResamplerTest
int frames_;
bool fill_junk_values_;
scoped_ptr<AudioBus> audio_bus_;
+ int last_frame_delay_;
DISALLOW_COPY_AND_ASSIGN(MultiChannelResamplerTest);
};