diff options
22 files changed, 861 insertions, 432 deletions
diff --git a/media/audio/audio_output_resampler.cc b/media/audio/audio_output_resampler.cc index 75b703b..baad88a 100644 --- a/media/audio/audio_output_resampler.cc +++ b/media/audio/audio_output_resampler.cc @@ -16,20 +16,19 @@ #include "media/audio/audio_output_proxy.h" #include "media/audio/audio_util.h" #include "media/audio/sample_rates.h" -#include "media/base/audio_pull_fifo.h" -#include "media/base/channel_mixer.h" +#include "media/base/audio_converter.h" #include "media/base/limits.h" #include "media/base/media_switches.h" -#include "media/base/multi_channel_resampler.h" namespace media { -class OnMoreDataResampler : public AudioOutputStream::AudioSourceCallback { +class OnMoreDataConverter + : public AudioOutputStream::AudioSourceCallback, + public AudioConverter::InputCallback { public: - OnMoreDataResampler(double io_ratio, - const AudioParameters& input_params, + OnMoreDataConverter(const AudioParameters& input_params, const AudioParameters& output_params); - virtual ~OnMoreDataResampler(); + virtual ~OnMoreDataConverter(); // AudioSourceCallback interface. virtual int OnMoreData(AudioBus* dest, @@ -48,15 +47,9 @@ class OnMoreDataResampler : public AudioOutputStream::AudioSourceCallback { void Stop(); private: - // Called by MultiChannelResampler when more data is necessary. - void ProvideInput(AudioBus* audio_bus); - - // Called by AudioPullFifo when more data is necessary. Requires - // |source_lock_| to have been acquired. - void SourceCallback_Locked(AudioBus* audio_bus); - - // Passes through |source| to the |source_callback_| OnMoreIOData() call. - void SourceIOCallback_Locked(AudioBus* source, AudioBus* dest); + // AudioConverter::InputCallback implementation. + virtual double ProvideInput(AudioBus* audio_bus, + base::TimeDelta buffer_delay) OVERRIDE; // Ratio of input bytes to output bytes used to correct playback delay with // regard to buffering and resampling. @@ -66,36 +59,20 @@ class OnMoreDataResampler : public AudioOutputStream::AudioSourceCallback { base::Lock source_lock_; AudioOutputStream::AudioSourceCallback* source_callback_; + // |source| passed to OnMoreIOData() which should be passed downstream. + AudioBus* source_bus_; + // Last AudioBuffersState object received via OnMoreData(), used to correct // playback delay by ProvideInput() and passed on to |source_callback_|. AudioBuffersState current_buffers_state_; - // Total number of bytes (in terms of output parameters) stored in resampler - // or FIFO buffers which have not been sent to the audio device. - int outstanding_audio_bytes_; - - // Used to buffer data between the client and the output device in cases where - // the client buffer size is not the same as the output device buffer size. - // Bound to SourceCallback_Locked() so must only be used when |source_lock_| - // has already been acquired. - scoped_ptr<AudioPullFifo> audio_fifo_; - - // Handles resampling. - scoped_ptr<MultiChannelResampler> resampler_; + const int input_bytes_per_second_; - // Handles channel transforms. |unmixed_audio_| is a temporary destination - // for audio data before it goes into the channel mixer. - scoped_ptr<ChannelMixer> channel_mixer_; - scoped_ptr<AudioBus> unmixed_audio_; + // Handles resampling, buffering, and channel mixing between input and output + // parameters. + AudioConverter audio_converter_; - int output_bytes_per_frame_; - int input_bytes_per_frame_; - - // Since resampling is expensive, figure out if we should downmix channels - // before resampling. - bool downmix_early_; - - DISALLOW_COPY_AND_ASSIGN(OnMoreDataResampler); + DISALLOW_COPY_AND_ASSIGN(OnMoreDataConverter); }; // Record UMA statistics for hardware output configuration. @@ -170,7 +147,6 @@ AudioOutputResampler::AudioOutputResampler(AudioManager* audio_manager, const AudioParameters& output_params, const base::TimeDelta& close_delay) : AudioOutputDispatcher(audio_manager, input_params), - io_ratio_(1), close_delay_(close_delay), output_params_(output_params), streams_opened_(false) { @@ -191,37 +167,6 @@ AudioOutputResampler::~AudioOutputResampler() { void AudioOutputResampler::Initialize() { DCHECK(!streams_opened_); DCHECK(callbacks_.empty()); - - io_ratio_ = 1; - - // Only resample or rebuffer if the input parameters don't match the output - // parameters to avoid any unnecessary work. - if (params_.channels() != output_params_.channels() || - params_.sample_rate() != output_params_.sample_rate() || - params_.bits_per_sample() != output_params_.bits_per_sample() || - params_.frames_per_buffer() != output_params_.frames_per_buffer()) { - if (params_.sample_rate() != output_params_.sample_rate()) { - double io_sample_rate_ratio = params_.sample_rate() / - static_cast<double>(output_params_.sample_rate()); - // Include the I/O resampling ratio in our global I/O ratio. - io_ratio_ *= io_sample_rate_ratio; - } - - // Include bits per channel differences. - io_ratio_ *= static_cast<double>(params_.bits_per_sample()) / - output_params_.bits_per_sample(); - - // Include channel count differences. - io_ratio_ *= static_cast<double>(params_.channels()) / - output_params_.channels(); - - DVLOG(1) << "I/O ratio is " << io_ratio_; - } else { - DVLOG(1) << "Input and output params are the same; in pass-through mode."; - } - - // TODO(dalecurtis): All this code should be merged into AudioOutputMixer once - // we've stabilized the issues there. dispatcher_ = new AudioOutputDispatcherImpl( audio_manager_, output_params_, close_delay_); } @@ -274,11 +219,10 @@ bool AudioOutputResampler::StartStream( AudioOutputProxy* stream_proxy) { DCHECK_EQ(MessageLoop::current(), message_loop_); - OnMoreDataResampler* resampler_callback = NULL; + OnMoreDataConverter* resampler_callback = NULL; CallbackMap::iterator it = callbacks_.find(stream_proxy); if (it == callbacks_.end()) { - resampler_callback = new OnMoreDataResampler( - io_ratio_, params_, output_params_); + resampler_callback = new OnMoreDataConverter(params_, output_params_); callbacks_[stream_proxy] = resampler_callback; } else { resampler_callback = it->second; @@ -299,7 +243,7 @@ void AudioOutputResampler::StopStream(AudioOutputProxy* stream_proxy) { // Now that StopStream() has completed the underlying physical stream should // be stopped and no longer calling OnMoreData(), making it safe to Stop() the - // OnMoreDataResampler. + // OnMoreDataConverter. CallbackMap::iterator it = callbacks_.find(stream_proxy); if (it != callbacks_.end()) it->second->Stop(); @@ -310,7 +254,7 @@ void AudioOutputResampler::CloseStream(AudioOutputProxy* stream_proxy) { dispatcher_->CloseStream(stream_proxy); // We assume that StopStream() is always called prior to CloseStream(), so - // that it is safe to delete the OnMoreDataResampler here. + // that it is safe to delete the OnMoreDataConverter here. CallbackMap::iterator it = callbacks_.find(stream_proxy); if (it != callbacks_.end()) { delete it->second; @@ -329,95 +273,43 @@ void AudioOutputResampler::Shutdown() { DCHECK(callbacks_.empty()); } -OnMoreDataResampler::OnMoreDataResampler( - double io_ratio, const AudioParameters& input_params, - const AudioParameters& output_params) - : io_ratio_(io_ratio), - source_callback_(NULL), - outstanding_audio_bytes_(0), - output_bytes_per_frame_(output_params.GetBytesPerFrame()), - input_bytes_per_frame_(input_params.GetBytesPerFrame()), - downmix_early_(false) { - // Handle different input and output channel layouts. - if (input_params.channel_layout() != output_params.channel_layout()) { - DVLOG(1) << "Remixing channel layout from " << input_params.channel_layout() - << " to " << output_params.channel_layout() << "; from " - << input_params.channels() << " channels to " - << output_params.channels() << " channels."; - channel_mixer_.reset(new ChannelMixer( - input_params.channel_layout(), output_params.channel_layout())); - - // Pare off data as early as we can for efficiency. - downmix_early_ = input_params.channels() > output_params.channels(); - if (downmix_early_) { - DVLOG(1) << "Remixing channel layout prior to resampling."; - // If we're downmixing early we need a temporary AudioBus which matches - // the the input channel count and input frame size since we're passing - // |unmixed_audio_| directly to the |source_callback_|. - unmixed_audio_ = AudioBus::Create(input_params); - } else { - // Instead, if we're not downmixing early we need a temporary AudioBus - // which matches the input channel count but uses the output frame size - // since we'll mix into the AudioBus from the output stream. - unmixed_audio_ = AudioBus::Create( - input_params.channels(), output_params.frames_per_buffer()); - } - } - - // Only resample if necessary since it's expensive. - if (input_params.sample_rate() != output_params.sample_rate()) { - DVLOG(1) << "Resampling from " << input_params.sample_rate() << " to " - << output_params.sample_rate(); - double io_sample_rate_ratio = input_params.sample_rate() / - static_cast<double>(output_params.sample_rate()); - resampler_.reset(new MultiChannelResampler( - downmix_early_ ? output_params.channels() : - input_params.channels(), - io_sample_rate_ratio, base::Bind( - &OnMoreDataResampler::ProvideInput, base::Unretained(this)))); - } - - // Since the resampler / output device may want a different buffer size than - // the caller asked for, we need to use a FIFO to ensure that both sides - // read in chunk sizes they're configured for. - if (input_params.sample_rate() != output_params.sample_rate() || - input_params.frames_per_buffer() != output_params.frames_per_buffer()) { - DVLOG(1) << "Rebuffering from " << input_params.frames_per_buffer() - << " to " << output_params.frames_per_buffer(); - audio_fifo_.reset(new AudioPullFifo( - downmix_early_ ? output_params.channels() : - input_params.channels(), - input_params.frames_per_buffer(), base::Bind( - &OnMoreDataResampler::SourceCallback_Locked, - base::Unretained(this)))); - } +OnMoreDataConverter::OnMoreDataConverter(const AudioParameters& input_params, + const AudioParameters& output_params) + : source_callback_(NULL), + source_bus_(NULL), + input_bytes_per_second_(input_params.GetBytesPerSecond()), + audio_converter_(input_params, output_params, false) { + io_ratio_ = + static_cast<double>(input_params.GetBytesPerSecond()) / + output_params.GetBytesPerSecond(); } -OnMoreDataResampler::~OnMoreDataResampler() {} +OnMoreDataConverter::~OnMoreDataConverter() {} -void OnMoreDataResampler::Start( +void OnMoreDataConverter::Start( AudioOutputStream::AudioSourceCallback* callback) { base::AutoLock auto_lock(source_lock_); DCHECK(!source_callback_); source_callback_ = callback; + + // While AudioConverter can handle multiple inputs, we're using it only with + // a single input currently. Eventually this may be the basis for a browser + // side mixer. + audio_converter_.AddInput(this); } -void OnMoreDataResampler::Stop() { +void OnMoreDataConverter::Stop() { base::AutoLock auto_lock(source_lock_); source_callback_ = NULL; - outstanding_audio_bytes_ = 0; - if (audio_fifo_) - audio_fifo_->Clear(); - if (resampler_) - resampler_->Flush(); + audio_converter_.RemoveInput(this); } -int OnMoreDataResampler::OnMoreData(AudioBus* dest, +int OnMoreDataConverter::OnMoreData(AudioBus* dest, AudioBuffersState buffers_state) { return OnMoreIOData(NULL, dest, buffers_state); } -int OnMoreDataResampler::OnMoreIOData(AudioBus* source, +int OnMoreDataConverter::OnMoreIOData(AudioBus* source, AudioBus* dest, AudioBuffersState buffers_state) { base::AutoLock auto_lock(source_lock_); @@ -427,91 +319,54 @@ int OnMoreDataResampler::OnMoreIOData(AudioBus* source, return dest->frames(); } + source_bus_ = source; current_buffers_state_ = buffers_state; + audio_converter_.Convert(dest); - bool needs_mixing = channel_mixer_ && !downmix_early_; - AudioBus* temp_dest = needs_mixing ? unmixed_audio_.get() : dest; - - if (!resampler_ && !audio_fifo_) { - // We have no internal buffers, so clear any outstanding audio data. - outstanding_audio_bytes_ = 0; - SourceIOCallback_Locked(source, temp_dest); - } else { - if (resampler_) - resampler_->Resample(temp_dest, temp_dest->frames()); - else - ProvideInput(temp_dest); - - // Calculate how much data is left in the internal FIFO and resampler. - outstanding_audio_bytes_ -= temp_dest->frames() * output_bytes_per_frame_; - } - - if (needs_mixing) { - DCHECK_EQ(temp_dest->frames(), dest->frames()); - channel_mixer_->Transform(temp_dest, dest); - } - - // Due to rounding errors while multiplying against |io_ratio_|, - // |outstanding_audio_bytes_| might (rarely) slip below zero. - if (outstanding_audio_bytes_ < 0) { - DLOG(ERROR) << "Outstanding audio bytes went negative! Value: " - << outstanding_audio_bytes_; - outstanding_audio_bytes_ = 0; - } - - // Always return the full number of frames requested, ProvideInput() will pad - // with silence if it wasn't able to acquire enough data. + // Always return the full number of frames requested, ProvideInput_Locked() + // will pad with silence if it wasn't able to acquire enough data. return dest->frames(); } -void OnMoreDataResampler::SourceCallback_Locked(AudioBus* dest) { - SourceIOCallback_Locked(NULL, dest); -} - -void OnMoreDataResampler::SourceIOCallback_Locked(AudioBus* source, - AudioBus* dest) { +double OnMoreDataConverter::ProvideInput(AudioBus* dest, + base::TimeDelta buffer_delay) { source_lock_.AssertAcquired(); - // Adjust playback delay to include the state of the internal buffers used by - // the resampler and/or the FIFO. Since the sample rate and bits per channel - // may be different, we need to scale this value appropriately. + // Adjust playback delay to include |buffer_delay|. + // TODO(dalecurtis): Stop passing bytes around, it doesn't make sense since + // AudioBus is just float data. Use TimeDelta instead. AudioBuffersState new_buffers_state; - new_buffers_state.pending_bytes = io_ratio_ * - (current_buffers_state_.total_bytes() + outstanding_audio_bytes_); + new_buffers_state.pending_bytes = + io_ratio_ * (current_buffers_state_.total_bytes() + + buffer_delay.InSecondsF() * input_bytes_per_second_); - bool needs_downmix = channel_mixer_ && downmix_early_; - AudioBus* temp_dest = needs_downmix ? unmixed_audio_.get() : dest; - - // Retrieve data from the original callback. Zero any unfilled frames. + // Retrieve data from the original callback. int frames = source_callback_->OnMoreIOData( - source, temp_dest, new_buffers_state); - if (frames < temp_dest->frames()) - temp_dest->ZeroFramesPartial(frames, temp_dest->frames() - frames); - - // Scale the number of frames we got back in terms of input bytes to output - // bytes accordingly. - outstanding_audio_bytes_ += - (temp_dest->frames() * input_bytes_per_frame_) / io_ratio_; - - if (needs_downmix) { - DCHECK_EQ(temp_dest->frames(), dest->frames()); - channel_mixer_->Transform(temp_dest, dest); - } -} + source_bus_, dest, new_buffers_state); + + // |source_bus_| should only be provided once. + // TODO(dalecurtis, crogers): This is not a complete fix. If ProvideInput() + // is called multiple times, we need to do something more clever here. + source_bus_ = NULL; -void OnMoreDataResampler::ProvideInput(AudioBus* audio_bus) { - audio_fifo_->Consume(audio_bus, audio_bus->frames()); + // Zero any unfilled frames if anything was filled, otherwise we'll just + // return a volume of zero and let AudioConverter drop the output. + if (frames > 0 && frames < dest->frames()) + dest->ZeroFramesPartial(frames, dest->frames() - frames); + + // TODO(dalecurtis): Return the correct volume here. + return frames > 0 ? 1 : 0; } -void OnMoreDataResampler::OnError(AudioOutputStream* stream, int code) { +void OnMoreDataConverter::OnError(AudioOutputStream* stream, int code) { base::AutoLock auto_lock(source_lock_); if (source_callback_) source_callback_->OnError(stream, code); } -void OnMoreDataResampler::WaitTillDataReady() { +void OnMoreDataConverter::WaitTillDataReady() { base::AutoLock auto_lock(source_lock_); - if (source_callback_ && !outstanding_audio_bytes_) + if (source_callback_) source_callback_->WaitTillDataReady(); } diff --git a/media/audio/audio_output_resampler.h b/media/audio/audio_output_resampler.h index 7643437..057cf34 100644 --- a/media/audio/audio_output_resampler.h +++ b/media/audio/audio_output_resampler.h @@ -17,31 +17,24 @@ namespace media { -class OnMoreDataResampler; +class OnMoreDataConverter; -// AudioOutputResampler is a browser-side resampling and rebuffering solution -// which ensures audio data is always output at given parameters. The rough -// flow is: Client -> [FIFO] -> [Resampler] -> Output Device. -// -// The FIFO and resampler are only used when necessary. To be clear: -// - The resampler is only used if the input and output sample rates differ. -// - The FIFO is only used if the input and output frame sizes differ or if -// the resampler is used. +// AudioOutputResampler is a browser-side resampling and buffering solution +// which ensures audio data is always output at given parameters. See the +// AudioConverter class for details on the conversion process. // // AOR works by intercepting the AudioSourceCallback provided to StartStream() -// and redirecting to the appropriate resampling or FIFO callback which passes -// through to the original callback only when necessary. +// and redirecting it through an AudioConverter instance. AudioBuffersState is +// adjusted for buffer delay caused by the conversion process. // // AOR will automatically fall back from AUDIO_PCM_LOW_LATENCY to // AUDIO_PCM_LINEAR if the output device fails to open at the requested output // parameters. +// // TODO(dalecurtis): Ideally the low latency path will be as reliable as the // high latency path once we have channel mixing and support querying for the // hardware's configured bit depth. Monitor the UMA stats for fallback and // remove fallback support once it's stable. http://crbug.com/148418 -// -// Currently channel downmixing and upmixing is not supported. -// TODO(dalecurtis): Add channel remixing. http://crbug.com/138762 class MEDIA_EXPORT AudioOutputResampler : public AudioOutputDispatcher { public: AudioOutputResampler(AudioManager* audio_manager, @@ -63,21 +56,17 @@ class MEDIA_EXPORT AudioOutputResampler : public AudioOutputDispatcher { friend class base::RefCountedThreadSafe<AudioOutputResampler>; virtual ~AudioOutputResampler(); - // Used to initialize the FIFO and resamplers. + // Used to initialize and reinitialize |dispatcher_|. void Initialize(); // Dispatcher to proxy all AudioOutputDispatcher calls too. scoped_refptr<AudioOutputDispatcher> dispatcher_; - // Map of outstanding OnMoreDataResampler objects. A new object is created + // Map of outstanding OnMoreDataConverter objects. A new object is created // on every StartStream() call and destroyed on CloseStream(). - typedef std::map<AudioOutputProxy*, OnMoreDataResampler*> CallbackMap; + typedef std::map<AudioOutputProxy*, OnMoreDataConverter*> CallbackMap; CallbackMap callbacks_; - // Ratio of input bytes to output bytes used to correct playback delay with - // regard to buffering and resampling. - double io_ratio_; - // Used by AudioOutputDispatcherImpl; kept so we can reinitialize on the fly. base::TimeDelta close_delay_; diff --git a/media/base/audio_converter.cc b/media/base/audio_converter.cc new file mode 100644 index 0000000..1b66b03 --- /dev/null +++ b/media/base/audio_converter.cc @@ -0,0 +1,213 @@ +// Copyright (c) 2012 The Chromium Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#include "media/base/audio_converter.h" + +#include <algorithm> + +#include "base/bind.h" +#include "base/bind_helpers.h" +#include "media/base/audio_pull_fifo.h" +#include "media/base/channel_mixer.h" +#include "media/base/multi_channel_resampler.h" +#include "media/base/vector_math.h" + +namespace media { + +AudioConverter::AudioConverter(const AudioParameters& input_params, + const AudioParameters& output_params, + bool disable_fifo) + : downmix_early_(false), + resampler_frame_delay_(0), + input_channel_count_(input_params.channels()) { + CHECK(input_params.IsValid()); + CHECK(output_params.IsValid()); + + // Handle different input and output channel layouts. + if (input_params.channel_layout() != output_params.channel_layout()) { + DVLOG(1) << "Remixing channel layout from " << input_params.channel_layout() + << " to " << output_params.channel_layout() << "; from " + << input_params.channels() << " channels to " + << output_params.channels() << " channels."; + channel_mixer_.reset(new ChannelMixer( + input_params.channel_layout(), output_params.channel_layout())); + + // Pare off data as early as we can for efficiency. + downmix_early_ = input_params.channels() > output_params.channels(); + if (downmix_early_) { + DVLOG(1) << "Remixing channel layout prior to resampling."; + // |unmixed_audio_| will be allocated on the fly. + } else { + // Instead, if we're not downmixing early we need a temporary AudioBus + // which matches the input channel count but uses the output frame size + // since we'll mix into the AudioBus from the output stream. + unmixed_audio_ = AudioBus::Create( + input_params.channels(), output_params.frames_per_buffer()); + } + } + + // Only resample if necessary since it's expensive. + if (input_params.sample_rate() != output_params.sample_rate()) { + DVLOG(1) << "Resampling from " << input_params.sample_rate() << " to " + << output_params.sample_rate(); + double io_sample_rate_ratio = input_params.sample_rate() / + static_cast<double>(output_params.sample_rate()); + resampler_.reset(new MultiChannelResampler( + downmix_early_ ? output_params.channels() : + input_params.channels(), + io_sample_rate_ratio, base::Bind( + &AudioConverter::ProvideInput, base::Unretained(this)))); + } + + input_frame_duration_ = base::TimeDelta::FromMicroseconds( + base::Time::kMicrosecondsPerSecond / + static_cast<double>(input_params.sample_rate())); + output_frame_duration_ = base::TimeDelta::FromMicroseconds( + base::Time::kMicrosecondsPerSecond / + static_cast<double>(output_params.sample_rate())); + + if (disable_fifo) + return; + + // Since the resampler / output device may want a different buffer size than + // the caller asked for, we need to use a FIFO to ensure that both sides + // read in chunk sizes they're configured for. + if (resampler_.get() || + input_params.frames_per_buffer() != output_params.frames_per_buffer()) { + DVLOG(1) << "Rebuffering from " << input_params.frames_per_buffer() + << " to " << output_params.frames_per_buffer(); + audio_fifo_.reset(new AudioPullFifo( + downmix_early_ ? output_params.channels() : + input_params.channels(), + input_params.frames_per_buffer(), base::Bind( + &AudioConverter::SourceCallback, + base::Unretained(this)))); + } +} + +AudioConverter::~AudioConverter() {} + +void AudioConverter::AddInput(InputCallback* input) { + transform_inputs_.push_back(input); +} + +void AudioConverter::RemoveInput(InputCallback* input) { + DCHECK(std::find(transform_inputs_.begin(), transform_inputs_.end(), input) != + transform_inputs_.end()); + transform_inputs_.remove(input); + + if (transform_inputs_.empty()) + Reset(); +} + +void AudioConverter::Reset() { + if (audio_fifo_) + audio_fifo_->Clear(); + if (resampler_) + resampler_->Flush(); +} + +void AudioConverter::Convert(AudioBus* dest) { + if (transform_inputs_.empty()) { + dest->Zero(); + return; + } + + bool needs_mixing = channel_mixer_ && !downmix_early_; + AudioBus* temp_dest = needs_mixing ? unmixed_audio_.get() : dest; + DCHECK(temp_dest); + + if (!resampler_ && !audio_fifo_) { + SourceCallback(0, temp_dest); + } else { + if (resampler_) + resampler_->Resample(temp_dest, temp_dest->frames()); + else + ProvideInput(0, temp_dest); + } + + if (needs_mixing) { + DCHECK_EQ(temp_dest->frames(), dest->frames()); + channel_mixer_->Transform(temp_dest, dest); + } +} + +void AudioConverter::SourceCallback(int fifo_frame_delay, AudioBus* dest) { + bool needs_downmix = channel_mixer_ && downmix_early_; + + if (!mixer_input_audio_bus_ || + mixer_input_audio_bus_->frames() != dest->frames()) { + mixer_input_audio_bus_ = + AudioBus::Create(input_channel_count_, dest->frames()); + } + + if (needs_downmix && + (!unmixed_audio_ || unmixed_audio_->frames() != dest->frames())) { + // If we're downmixing early we need a temporary AudioBus which matches + // the the input channel count and input frame size since we're passing + // |unmixed_audio_| directly to the |source_callback_|. + unmixed_audio_ = AudioBus::Create(input_channel_count_, dest->frames()); + } + + AudioBus* temp_dest = needs_downmix ? unmixed_audio_.get() : dest; + + // Sanity check our inputs. + DCHECK_EQ(temp_dest->frames(), mixer_input_audio_bus_->frames()); + DCHECK_EQ(temp_dest->channels(), mixer_input_audio_bus_->channels()); + + // Calculate the buffer delay for this callback. + base::TimeDelta buffer_delay; + if (resampler_) { + buffer_delay += base::TimeDelta::FromMicroseconds( + resampler_frame_delay_ * output_frame_duration_.InMicroseconds()); + } + if (audio_fifo_) { + buffer_delay += base::TimeDelta::FromMicroseconds( + fifo_frame_delay * input_frame_duration_.InMicroseconds()); + } + + // Have each mixer render its data into an output buffer then mix the result. + for (InputCallbackSet::iterator it = transform_inputs_.begin(); + it != transform_inputs_.end(); ++it) { + InputCallback* input = *it; + + float volume = input->ProvideInput( + mixer_input_audio_bus_.get(), buffer_delay); + + // Optimize the most common single input, full volume case. + if (it == transform_inputs_.begin()) { + if (volume == 1.0f) { + mixer_input_audio_bus_->CopyTo(temp_dest); + continue; + } + + // Zero |temp_dest| otherwise, so we're mixing into a clean buffer. + temp_dest->Zero(); + } + + // Volume adjust and mix each mixer input into |temp_dest| after rendering. + if (volume > 0) { + for (int i = 0; i < mixer_input_audio_bus_->channels(); ++i) { + vector_math::FMAC( + mixer_input_audio_bus_->channel(i), volume, + mixer_input_audio_bus_->frames(), temp_dest->channel(i)); + } + } + } + + if (needs_downmix) { + DCHECK_EQ(temp_dest->frames(), dest->frames()); + channel_mixer_->Transform(temp_dest, dest); + } +} + +void AudioConverter::ProvideInput(int resampler_frame_delay, AudioBus* dest) { + resampler_frame_delay_ = resampler_frame_delay; + if (audio_fifo_) + audio_fifo_->Consume(dest, dest->frames()); + else + SourceCallback(0, dest); +} + +} // namespace media diff --git a/media/base/audio_converter.h b/media/base/audio_converter.h new file mode 100644 index 0000000..2987b78 --- /dev/null +++ b/media/base/audio_converter.h @@ -0,0 +1,108 @@ +// Copyright (c) 2012 The Chromium Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#ifndef MEDIA_BASE_AUDIO_CONVERTER_H_ +#define MEDIA_BASE_AUDIO_CONVERTER_H_ + +#include <list> + +#include "base/callback.h" +#include "base/time.h" +#include "media/audio/audio_parameters.h" +#include "media/base/media_export.h" + +namespace media { + +class AudioBus; +class AudioPullFifo; +class ChannelMixer; +class MultiChannelResampler; + +// AudioConverter is a complete mixing, resampling, buffering, and channel +// mixing solution for converting data from one set of AudioParameters to +// another. For efficiency pieces are only invoked when necessary; e.g. the +// resampler is only used if the input and output sample rates differ. Mixing +// and channel down mixing are done prior to resampling to maximize efficiency. +class MEDIA_EXPORT AudioConverter { + public: + class MEDIA_EXPORT InputCallback { + public: + // Method for providing more data into the converter. Expects |audio_bus| + // to be completely filled with data upon return; zero padded if not enough + // frames are available to satisfy the request. The return value is the + // volume level of the provided audio data. If a volume level of zero is + // returned no further processing will be done on the provided data, else + // the volume level will be used to scale the provided audio data. + virtual double ProvideInput(AudioBus* audio_bus, + base::TimeDelta buffer_delay) = 0; + + protected: + virtual ~InputCallback() {} + }; + + // Construct an AudioConverter for converting between the given input and + // output parameters. Specifying |disable_fifo| means all InputCallbacks are + // capable of handling arbitrary buffer size requests; i.e. one call might ask + // for 10 frames of data (indicated by the size of AudioBus provided) and the + // next might ask for 20. In synthetic testing, disabling the FIFO yields a + // ~20% speed up for common cases. + AudioConverter(const AudioParameters& input_params, + const AudioParameters& output_params, + bool disable_fifo); + ~AudioConverter(); + + // Converts audio from all inputs into the |dest|. |dest| must be sized for + // data matching the output AudioParameters provided during construction. + void Convert(AudioBus* dest); + + // Add or remove an input from the converter. + void AddInput(InputCallback* input); + void RemoveInput(InputCallback* input); + + // Flush all buffered data. Automatically called when all inputs are removed. + void Reset(); + + private: + // Called by MultiChannelResampler when more data is necessary. + void ProvideInput(int resampler_frame_delay, AudioBus* audio_bus); + + // Called by AudioPullFifo when more data is necessary. + void SourceCallback(int fifo_frame_delay, AudioBus* audio_bus); + + // Set of inputs for Convert(). + typedef std::list<InputCallback*> InputCallbackSet; + InputCallbackSet transform_inputs_; + + // Used to buffer data between the client and the output device in cases where + // the client buffer size is not the same as the output device buffer size. + scoped_ptr<AudioPullFifo> audio_fifo_; + + // Handles resampling. + scoped_ptr<MultiChannelResampler> resampler_; + + // Handles channel transforms. |unmixed_audio_| is a temporary destination + // for audio data before it goes into the channel mixer. + scoped_ptr<ChannelMixer> channel_mixer_; + scoped_ptr<AudioBus> unmixed_audio_; + + // Temporary AudioBus destination for mixing inputs. + scoped_ptr<AudioBus> mixer_input_audio_bus_; + + // Since resampling is expensive, figure out if we should downmix channels + // before resampling. + bool downmix_early_; + + // Used to calculate buffer delay information for InputCallbacks. + base::TimeDelta input_frame_duration_; + base::TimeDelta output_frame_duration_; + int resampler_frame_delay_; + + const int input_channel_count_; + + DISALLOW_COPY_AND_ASSIGN(AudioConverter); +}; + +} // namespace media + +#endif // MEDIA_BASE_AUDIO_CONVERTER_H_ diff --git a/media/base/audio_converter_unittest.cc b/media/base/audio_converter_unittest.cc new file mode 100644 index 0000000..0861222 --- /dev/null +++ b/media/base/audio_converter_unittest.cc @@ -0,0 +1,286 @@ +// Copyright (c) 2012 The Chromium Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +// MSVC++ requires this to be set before any other includes to get M_PI. +#define _USE_MATH_DEFINES + +#include <cmath> + +#include "base/command_line.h" +#include "base/logging.h" +#include "base/memory/scoped_ptr.h" +#include "base/memory/scoped_vector.h" +#include "base/string_number_conversions.h" +#include "base/time.h" +#include "media/base/audio_converter.h" +#include "media/base/fake_audio_render_callback.h" +#include "testing/gmock/include/gmock/gmock.h" +#include "testing/gtest/include/gtest/gtest.h" + +namespace media { + +// Command line switch for runtime adjustment of benchmark iterations. +static const char kBenchmarkIterations[] = "audio-converter-iterations"; +static const int kDefaultIterations = 10; + +// Parameters which control the many input case tests. +static const int kConvertInputs = 8; +static const int kConvertCycles = 3; + +// Parameters used for testing. +static const int kBitsPerChannel = 32; +static const ChannelLayout kChannelLayout = CHANNEL_LAYOUT_STEREO; +static const int kHighLatencyBufferSize = 2048; +static const int kLowLatencyBufferSize = 256; +static const int kSampleRate = 48000; + +// Number of full sine wave cycles for each Render() call. +static const int kSineCycles = 4; + +// Tuple of <input sampling rate, output sampling rate, epsilon>. +typedef std::tr1::tuple<int, int, double> AudioConverterTestData; +class AudioConverterTest + : public testing::TestWithParam<AudioConverterTestData> { + public: + AudioConverterTest() + : epsilon_(std::tr1::get<2>(GetParam())) { + // Create input and output parameters based on test parameters. + input_parameters_ = AudioParameters( + AudioParameters::AUDIO_PCM_LINEAR, kChannelLayout, + std::tr1::get<0>(GetParam()), kBitsPerChannel, kHighLatencyBufferSize); + output_parameters_ = AudioParameters( + AudioParameters::AUDIO_PCM_LOW_LATENCY, kChannelLayout, + std::tr1::get<1>(GetParam()), 16, kLowLatencyBufferSize); + + converter_.reset(new AudioConverter( + input_parameters_, output_parameters_, false)); + + audio_bus_ = AudioBus::Create(output_parameters_); + expected_audio_bus_ = AudioBus::Create(output_parameters_); + + // Allocate one callback for generating expected results. + double step = kSineCycles / static_cast<double>( + output_parameters_.frames_per_buffer()); + expected_callback_.reset(new FakeAudioRenderCallback(step)); + } + + void InitializeInputs(int count) { + // Setup FakeAudioRenderCallback step to compensate for resampling. + double scale_factor = input_parameters_.sample_rate() / + static_cast<double>(output_parameters_.sample_rate()); + double step = kSineCycles / (scale_factor * + static_cast<double>(output_parameters_.frames_per_buffer())); + + for (int i = 0; i < count; ++i) { + fake_callbacks_.push_back(new FakeAudioRenderCallback(step)); + converter_->AddInput(fake_callbacks_[i]); + } + } + + void Reset() { + converter_->Reset(); + for (size_t i = 0; i < fake_callbacks_.size(); ++i) + fake_callbacks_[i]->reset(); + expected_callback_->reset(); + } + + void SetVolume(float volume) { + for (size_t i = 0; i < fake_callbacks_.size(); ++i) + fake_callbacks_[i]->set_volume(volume); + } + + bool ValidateAudioData(int index, int frames, float scale) { + for (int i = 0; i < audio_bus_->channels(); ++i) { + for (int j = index; j < frames; j++) { + double error = fabs(audio_bus_->channel(i)[j] - + expected_audio_bus_->channel(i)[j] * scale); + if (error > epsilon_) { + EXPECT_NEAR(expected_audio_bus_->channel(i)[j] * scale, + audio_bus_->channel(i)[j], epsilon_) + << " i=" << i << ", j=" << j; + return false; + } + } + } + return true; + } + + bool RenderAndValidateAudioData(float scale) { + // Render actual audio data. + converter_->Convert(audio_bus_.get()); + + // Render expected audio data. + expected_callback_->Render(expected_audio_bus_.get(), 0); + + return ValidateAudioData(0, audio_bus_->frames(), scale); + } + + // Fill |audio_bus_| fully with |value|. + void FillAudioData(float value) { + for (int i = 0; i < audio_bus_->channels(); ++i) { + std::fill(audio_bus_->channel(i), + audio_bus_->channel(i) + audio_bus_->frames(), value); + } + } + + // Verify output with a number of transform inputs. + void RunTest(int inputs) { + InitializeInputs(inputs); + + SetVolume(0); + for (int i = 0; i < kConvertCycles; ++i) + ASSERT_TRUE(RenderAndValidateAudioData(0)); + + Reset(); + + // Set a different volume for each input and verify the results. + float total_scale = 0; + for (size_t i = 0; i < fake_callbacks_.size(); ++i) { + float volume = static_cast<float>(i) / fake_callbacks_.size(); + total_scale += volume; + fake_callbacks_[i]->set_volume(volume); + } + for (int i = 0; i < kConvertCycles; ++i) + ASSERT_TRUE(RenderAndValidateAudioData(total_scale)); + + Reset(); + + // Remove every other input. + for (size_t i = 1; i < fake_callbacks_.size(); i += 2) + converter_->RemoveInput(fake_callbacks_[i]); + + SetVolume(1); + float scale = inputs > 1 ? inputs / 2.0f : inputs; + for (int i = 0; i < kConvertCycles; ++i) + ASSERT_TRUE(RenderAndValidateAudioData(scale)); + } + + protected: + virtual ~AudioConverterTest() {} + + scoped_ptr<AudioConverter> converter_; + AudioParameters input_parameters_; + AudioParameters output_parameters_; + scoped_ptr<AudioBus> audio_bus_; + scoped_ptr<AudioBus> expected_audio_bus_; + ScopedVector<FakeAudioRenderCallback> fake_callbacks_; + scoped_ptr<FakeAudioRenderCallback> expected_callback_; + double epsilon_; + + DISALLOW_COPY_AND_ASSIGN(AudioConverterTest); +}; + +// Ensure the buffer delay provided by AudioConverter is accurate. +TEST(AudioConverterTest, AudioDelay) { + // Choose input and output parameters such that the transform must make + // multiple calls to fill the buffer. + AudioParameters input_parameters = AudioParameters( + AudioParameters::AUDIO_PCM_LINEAR, kChannelLayout, kSampleRate, + kBitsPerChannel, kLowLatencyBufferSize); + AudioParameters output_parameters = AudioParameters( + AudioParameters::AUDIO_PCM_LINEAR, kChannelLayout, kSampleRate * 2, + kBitsPerChannel, kHighLatencyBufferSize); + + AudioConverter converter(input_parameters, output_parameters, false); + FakeAudioRenderCallback callback(0.2); + scoped_ptr<AudioBus> audio_bus = AudioBus::Create(output_parameters); + converter.AddInput(&callback); + converter.Convert(audio_bus.get()); + + // Calculate the expected buffer delay for given AudioParameters. + double input_sample_rate = input_parameters.sample_rate(); + int fill_count = + (output_parameters.frames_per_buffer() * input_sample_rate / + output_parameters.sample_rate()) / input_parameters.frames_per_buffer(); + + base::TimeDelta input_frame_duration = base::TimeDelta::FromMicroseconds( + base::Time::kMicrosecondsPerSecond / input_sample_rate); + + int expected_last_delay_milliseconds = + fill_count * input_parameters.frames_per_buffer() * + input_frame_duration.InMillisecondsF(); + + EXPECT_EQ(expected_last_delay_milliseconds, + callback.last_audio_delay_milliseconds()); +} + +// Benchmark for audio conversion. Original benchmarks were run with +// --audio-converter-iterations=50000. +TEST(AudioConverterTest, ConvertBenchmark) { + int benchmark_iterations = kDefaultIterations; + std::string iterations(CommandLine::ForCurrentProcess()->GetSwitchValueASCII( + kBenchmarkIterations)); + base::StringToInt(iterations, &benchmark_iterations); + if (benchmark_iterations < kDefaultIterations) + benchmark_iterations = kDefaultIterations; + + // Create input and output parameters to convert between the two most common + // sets of parameters (as indicated via UMA data). + AudioParameters input_params( + AudioParameters::AUDIO_PCM_LINEAR, CHANNEL_LAYOUT_MONO, 48000, 16, 2048); + AudioParameters output_params( + AudioParameters::AUDIO_PCM_LINEAR, CHANNEL_LAYOUT_STEREO, 44100, 16, 440); + scoped_ptr<AudioConverter> converter( + new AudioConverter(input_params, output_params, false)); + + scoped_ptr<AudioBus> output_bus = AudioBus::Create(output_params); + FakeAudioRenderCallback fake_input1(0.2); + FakeAudioRenderCallback fake_input2(0.4); + FakeAudioRenderCallback fake_input3(0.6); + converter->AddInput(&fake_input1); + converter->AddInput(&fake_input2); + converter->AddInput(&fake_input3); + + printf("Benchmarking %d iterations:\n", benchmark_iterations); + + // Benchmark Convert() w/ FIFO. + base::TimeTicks start = base::TimeTicks::HighResNow(); + for (int i = 0; i < benchmark_iterations; ++i) { + converter->Convert(output_bus.get()); + } + double total_time_ms = + (base::TimeTicks::HighResNow() - start).InMillisecondsF(); + printf("Convert() w/ FIFO took %.2fms.\n", total_time_ms); + + converter.reset(new AudioConverter(input_params, output_params, true)); + converter->AddInput(&fake_input1); + converter->AddInput(&fake_input2); + converter->AddInput(&fake_input3); + + // Benchmark Convert() w/o FIFO. + start = base::TimeTicks::HighResNow(); + for (int i = 0; i < benchmark_iterations; ++i) { + converter->Convert(output_bus.get()); + } + total_time_ms = + (base::TimeTicks::HighResNow() - start).InMillisecondsF(); + printf("Convert() w/o FIFO took %.2fms.\n", total_time_ms); +} + +TEST_P(AudioConverterTest, NoInputs) { + FillAudioData(1.0f); + EXPECT_TRUE(RenderAndValidateAudioData(0.0f)); +} + +TEST_P(AudioConverterTest, OneInput) { + RunTest(1); +} + +TEST_P(AudioConverterTest, ManyInputs) { + RunTest(kConvertInputs); +} + +INSTANTIATE_TEST_CASE_P( + // TODO(dalecurtis): Add test cases for channel transforms. + AudioConverterTest, AudioConverterTest, testing::Values( + // No resampling. + std::tr1::make_tuple(44100, 44100, 0.00000048), + + // Upsampling. + std::tr1::make_tuple(44100, 48000, 0.033), + + // Downsampling. + std::tr1::make_tuple(48000, 41000, 0.042))); + +} // namespace media diff --git a/media/base/audio_pull_fifo.cc b/media/base/audio_pull_fifo.cc index b1622e0..4943591 100644 --- a/media/base/audio_pull_fifo.cc +++ b/media/base/audio_pull_fifo.cc @@ -33,7 +33,7 @@ void AudioPullFifo::Consume(AudioBus* destination, int frames_to_consume) { // Get the remaining audio frames from the producer using the callback. while (remaining_frames_to_provide > 0) { // Fill up the FIFO by acquiring audio data from the producer. - read_cb_.Run(bus_.get()); + read_cb_.Run(write_pos, bus_.get()); fifo_->Push(bus_.get()); // Try to fulfill the request using what's available in the FIFO. diff --git a/media/base/audio_pull_fifo.h b/media/base/audio_pull_fifo.h index 9fd3a8e..caf73e4 100644 --- a/media/base/audio_pull_fifo.h +++ b/media/base/audio_pull_fifo.h @@ -20,8 +20,9 @@ class MEDIA_EXPORT AudioPullFifo { public: // Callback type for providing more data into the FIFO. Expects AudioBus // to be completely filled with data upon return; zero padded if not enough - // frames are available to satisfy the request. - typedef base::Callback<void(AudioBus* audio_bus)> ReadCB; + // frames are available to satisfy the request. |frame_delay| is the number + // of output frames already processed and can be used to estimate delay. + typedef base::Callback<void(int frame_delay, AudioBus* audio_bus)> ReadCB; // Constructs an AudioPullFifo with the specified |read_cb|, which is used to // read audio data to the FIFO if data is not already available. The internal diff --git a/media/base/audio_pull_fifo_unittest.cc b/media/base/audio_pull_fifo_unittest.cc index e5c005d..cec4d35 100644 --- a/media/base/audio_pull_fifo_unittest.cc +++ b/media/base/audio_pull_fifo_unittest.cc @@ -29,7 +29,8 @@ class AudioPullFifoTest : pull_fifo_(kChannels, kMaxFramesInFifo, base::Bind( &AudioPullFifoTest::ProvideInput, base::Unretained(this))), audio_bus_(AudioBus::Create(kChannels, kMaxFramesInFifo)), - fill_value_(0) {} + fill_value_(0), + last_frame_delay_(-1) {} virtual ~AudioPullFifoTest() {} void VerifyValue(const float data[], int size, float start_value) { @@ -51,12 +52,16 @@ class AudioPullFifoTest VerifyValue(audio_bus_->channel(j), frames_to_consume, start_value); } start_value += frames_to_consume; + EXPECT_LT(last_frame_delay_, audio_bus_->frames()); } // AudioPullFifo::ReadCB implementation where we increase a value for each // audio frame that we provide. Note that all channels are given the same // value to simplify the verification. - virtual void ProvideInput(AudioBus* audio_bus) { + virtual void ProvideInput(int frame_delay, AudioBus* audio_bus) { + ASSERT_GT(frame_delay, last_frame_delay_); + last_frame_delay_ = frame_delay; + EXPECT_EQ(audio_bus->channels(), audio_bus_->channels()); EXPECT_EQ(audio_bus->frames(), kMaxFramesInFifo); for (int i = 0; i < audio_bus->frames(); ++i) { @@ -72,6 +77,7 @@ class AudioPullFifoTest AudioPullFifo pull_fifo_; scoped_ptr<AudioBus> audio_bus_; int fill_value_; + int last_frame_delay_; DISALLOW_COPY_AND_ASSIGN(AudioPullFifoTest); }; diff --git a/media/base/audio_renderer_mixer.cc b/media/base/audio_renderer_mixer.cc index 4df2eea..003f6dd 100644 --- a/media/base/audio_renderer_mixer.cc +++ b/media/base/audio_renderer_mixer.cc @@ -7,9 +7,6 @@ #include "base/bind.h" #include "base/bind_helpers.h" #include "base/logging.h" -#include "media/audio/audio_util.h" -#include "media/base/limits.h" -#include "media/base/vector_math.h" namespace media { @@ -17,26 +14,7 @@ AudioRendererMixer::AudioRendererMixer( const AudioParameters& input_params, const AudioParameters& output_params, const scoped_refptr<AudioRendererSink>& sink) : audio_sink_(sink), - current_audio_delay_milliseconds_(0), - io_ratio_(1), - input_ms_per_frame_( - static_cast<double>(base::Time::kMillisecondsPerSecond) / - input_params.sample_rate()) { - DCHECK(input_params.IsValid()); - DCHECK(output_params.IsValid()); - - // Channel mixing is handled by the browser side currently. - DCHECK_EQ(input_params.channels(), output_params.channels()); - - // Only resample if necessary since it's expensive. - if (input_params.sample_rate() != output_params.sample_rate()) { - io_ratio_ = input_params.sample_rate() / - static_cast<double>(output_params.sample_rate()); - resampler_.reset(new MultiChannelResampler( - output_params.channels(), io_ratio_, - base::Bind(&AudioRendererMixer::ProvideInput, base::Unretained(this)))); - } - + audio_converter_(input_params, output_params, true) { audio_sink_->Initialize(output_params, this); audio_sink_->Start(); } @@ -53,78 +31,29 @@ AudioRendererMixer::~AudioRendererMixer() { void AudioRendererMixer::AddMixerInput( const scoped_refptr<AudioRendererMixerInput>& input) { base::AutoLock auto_lock(mixer_inputs_lock_); - mixer_inputs_.insert(input); + mixer_inputs_.push_back(input); + audio_converter_.AddInput(input); } void AudioRendererMixer::RemoveMixerInput( const scoped_refptr<AudioRendererMixerInput>& input) { base::AutoLock auto_lock(mixer_inputs_lock_); - mixer_inputs_.erase(input); + audio_converter_.RemoveInput(input); + mixer_inputs_.remove(input); } int AudioRendererMixer::Render(AudioBus* audio_bus, int audio_delay_milliseconds) { - current_audio_delay_milliseconds_ = audio_delay_milliseconds / io_ratio_; - - if (resampler_.get()) - resampler_->Resample(audio_bus, audio_bus->frames()); - else - ProvideInput(audio_bus); - - // Always return the full number of frames requested, ProvideInput() will pad - // with silence if it wasn't able to acquire enough data. - return audio_bus->frames(); -} - -void AudioRendererMixer::ProvideInput(AudioBus* audio_bus) { base::AutoLock auto_lock(mixer_inputs_lock_); - // Allocate staging area for each mixer input's audio data on first call. We - // won't know how much to allocate until here because of resampling. Ensure - // our intermediate AudioBus is sized exactly as the original. Resize should - // only happen once due to the way the resampler works. - if (!mixer_input_audio_bus_.get() || - mixer_input_audio_bus_->frames() != audio_bus->frames()) { - mixer_input_audio_bus_ = - AudioBus::Create(audio_bus->channels(), audio_bus->frames()); - } - - // Sanity check our inputs. - DCHECK_EQ(audio_bus->frames(), mixer_input_audio_bus_->frames()); - DCHECK_EQ(audio_bus->channels(), mixer_input_audio_bus_->channels()); - - // Zero |audio_bus| so we're mixing into a clean buffer and return silence if - // we couldn't get enough data from our inputs. - audio_bus->Zero(); - - // Have each mixer render its data into an output buffer then mix the result. + // Set the delay information for each mixer input. for (AudioRendererMixerInputSet::iterator it = mixer_inputs_.begin(); it != mixer_inputs_.end(); ++it) { - const scoped_refptr<AudioRendererMixerInput>& input = *it; - - double volume; - input->GetVolume(&volume); - - // Nothing to do if the input isn't playing. - if (!input->playing()) - continue; - - int frames_filled = input->callback()->Render( - mixer_input_audio_bus_.get(), current_audio_delay_milliseconds_); - if (frames_filled == 0) - continue; - - // Volume adjust and mix each mixer input into |audio_bus| after rendering. - for (int i = 0; i < audio_bus->channels(); ++i) { - vector_math::FMAC( - mixer_input_audio_bus_->channel(i), volume, frames_filled, - audio_bus->channel(i)); - } + (*it)->set_audio_delay_milliseconds(audio_delay_milliseconds); } - // Update the delay estimate. - current_audio_delay_milliseconds_ += - audio_bus->frames() * input_ms_per_frame_; + audio_converter_.Convert(audio_bus); + return audio_bus->frames(); } void AudioRendererMixer::OnRenderError() { @@ -133,7 +62,7 @@ void AudioRendererMixer::OnRenderError() { // Call each mixer input and signal an error. for (AudioRendererMixerInputSet::iterator it = mixer_inputs_.begin(); it != mixer_inputs_.end(); ++it) { - (*it)->callback()->OnRenderError(); + (*it)->OnRenderError(); } } diff --git a/media/base/audio_renderer_mixer.h b/media/base/audio_renderer_mixer.h index 7bb85af..bac048d 100644 --- a/media/base/audio_renderer_mixer.h +++ b/media/base/audio_renderer_mixer.h @@ -5,20 +5,18 @@ #ifndef MEDIA_BASE_AUDIO_RENDERER_MIXER_H_ #define MEDIA_BASE_AUDIO_RENDERER_MIXER_H_ -#include <set> +#include <list> #include "base/synchronization/lock.h" +#include "media/base/audio_converter.h" #include "media/base/audio_renderer_mixer_input.h" #include "media/base/audio_renderer_sink.h" -#include "media/base/multi_channel_resampler.h" namespace media { // Mixes a set of AudioRendererMixerInputs into a single output stream which is // funneled into a single shared AudioRendererSink; saving a bundle on renderer -// side resources. Resampling is done post-mixing as it is the most expensive -// process. If the input sample rate matches the audio hardware sample rate, no -// resampling is done. +// side resources. class MEDIA_EXPORT AudioRendererMixer : NON_EXPORTED_BASE(public AudioRendererSink::RenderCallback) { public: @@ -37,33 +35,18 @@ class MEDIA_EXPORT AudioRendererMixer int audio_delay_milliseconds) OVERRIDE; virtual void OnRenderError() OVERRIDE; - // Handles mixing and volume adjustment. Fully fills |audio_bus| with mixed - // audio data. When resampling is necessary, ProvideInput() will be called - // by MultiChannelResampler when more data is necessary. - void ProvideInput(AudioBus* audio_bus); - // Output sink for this mixer. scoped_refptr<AudioRendererSink> audio_sink_; // Set of mixer inputs to be mixed by this mixer. Access is thread-safe // through |mixer_inputs_lock_|. - typedef std::set< scoped_refptr<AudioRendererMixerInput> > + typedef std::list<scoped_refptr<AudioRendererMixerInput> > AudioRendererMixerInputSet; AudioRendererMixerInputSet mixer_inputs_; base::Lock mixer_inputs_lock_; - // Vector for rendering audio data from each mixer input. - scoped_ptr<AudioBus> mixer_input_audio_bus_; - - // Handles resampling post-mixing. - scoped_ptr<MultiChannelResampler> resampler_; - - // The audio delay in milliseconds received by the last Render() call. - int current_audio_delay_milliseconds_; - - // Ratio of input data to output data. Used to scale audio delay information. - double io_ratio_; - double input_ms_per_frame_; + // Handles mixing and resampling between input and output parameters. + AudioConverter audio_converter_; DISALLOW_COPY_AND_ASSIGN(AudioRendererMixer); }; diff --git a/media/base/audio_renderer_mixer_input.cc b/media/base/audio_renderer_mixer_input.cc index c748f8f..763472a 100644 --- a/media/base/audio_renderer_mixer_input.cc +++ b/media/base/audio_renderer_mixer_input.cc @@ -18,7 +18,8 @@ AudioRendererMixerInput::AudioRendererMixerInput( get_mixer_cb_(get_mixer_cb), remove_mixer_cb_(remove_mixer_cb), mixer_(NULL), - callback_(NULL) { + callback_(NULL), + current_audio_delay_milliseconds_(0) { } AudioRendererMixerInput::~AudioRendererMixerInput() { @@ -72,8 +73,27 @@ bool AudioRendererMixerInput::SetVolume(double volume) { return true; } -void AudioRendererMixerInput::GetVolume(double* volume) { - *volume = volume_; +double AudioRendererMixerInput::ProvideInput(AudioBus* audio_bus, + base::TimeDelta buffer_delay) { + int frames_filled = 0; + + if (playing_) { + frames_filled = callback_->Render( + audio_bus, + current_audio_delay_milliseconds_ + buffer_delay.InMilliseconds()); + + // AudioConverter expects unfilled frames to be zeroed. + if (frames_filled < audio_bus->frames()) { + audio_bus->ZeroFramesPartial( + frames_filled, audio_bus->frames() - frames_filled); + } + } + + return frames_filled > 0 ? volume_ : 0; +} + +void AudioRendererMixerInput::OnRenderError() { + callback_->OnRenderError(); } } // namespace media diff --git a/media/base/audio_renderer_mixer_input.h b/media/base/audio_renderer_mixer_input.h index 486f5c2..023badd 100644 --- a/media/base/audio_renderer_mixer_input.h +++ b/media/base/audio_renderer_mixer_input.h @@ -8,6 +8,7 @@ #include <vector> #include "base/callback.h" +#include "media/base/audio_converter.h" #include "media/base/audio_renderer_sink.h" namespace media { @@ -15,7 +16,8 @@ namespace media { class AudioRendererMixer; class MEDIA_EXPORT AudioRendererMixerInput - : NON_EXPORTED_BASE(public AudioRendererSink) { + : NON_EXPORTED_BASE(public AudioRendererSink), + public AudioConverter::InputCallback { public: typedef base::Callback<AudioRendererMixer*( const AudioParameters& params)> GetMixerCB; @@ -24,9 +26,6 @@ class MEDIA_EXPORT AudioRendererMixerInput AudioRendererMixerInput( const GetMixerCB& get_mixer_cb, const RemoveMixerCB& remove_mixer_cb); - AudioRendererSink::RenderCallback* callback() { return callback_; } - bool playing() { return playing_; } - // AudioRendererSink implementation. virtual void Start() OVERRIDE; virtual void Stop() OVERRIDE; @@ -36,17 +35,29 @@ class MEDIA_EXPORT AudioRendererMixerInput virtual void Initialize(const AudioParameters& params, AudioRendererSink::RenderCallback* renderer) OVERRIDE; - void GetVolume(double* volume); + // Called by AudioRendererMixer when new delay information is available. + void set_audio_delay_milliseconds(int audio_delay_milliseconds) { + current_audio_delay_milliseconds_ = audio_delay_milliseconds; + } + + // Called by AudioRendererMixer when an error occurs. + void OnRenderError(); protected: virtual ~AudioRendererMixerInput(); private: + friend class AudioRendererMixerInputTest; + bool playing_; bool initialized_; bool started_; double volume_; + // AudioConverter::InputCallback implementation. + virtual double ProvideInput(AudioBus* audio_bus, + base::TimeDelta buffer_delay) OVERRIDE; + // Callbacks provided during construction which allow AudioRendererMixerInput // to retrieve a mixer during Initialize() and notify when it's done with it. GetMixerCB get_mixer_cb_; @@ -62,6 +73,9 @@ class MEDIA_EXPORT AudioRendererMixerInput // Source of audio data which is provided to the mixer. AudioRendererSink::RenderCallback* callback_; + // The current audio delay as last provided by AudioRendererMixer. + int current_audio_delay_milliseconds_; + DISALLOW_COPY_AND_ASSIGN(AudioRendererMixerInput); }; diff --git a/media/base/audio_renderer_mixer_input_unittest.cc b/media/base/audio_renderer_mixer_input_unittest.cc index a3cb3bc..5b4aac2 100644 --- a/media/base/audio_renderer_mixer_input_unittest.cc +++ b/media/base/audio_renderer_mixer_input_unittest.cc @@ -29,6 +29,7 @@ class AudioRendererMixerInputTest : public testing::Test { fake_callback_.reset(new FakeAudioRenderCallback(0)); mixer_input_->Initialize(audio_parameters_, fake_callback_.get()); EXPECT_CALL(*this, RemoveMixer(testing::_)); + audio_bus_ = AudioBus::Create(audio_parameters_); } void CreateMixerInput() { @@ -51,6 +52,14 @@ class AudioRendererMixerInputTest : public testing::Test { return mixer_.get(); } + double ProvideInput() { + return mixer_input_->ProvideInput(audio_bus_.get(), base::TimeDelta()); + } + + int GetAudioDelayMilliseconds() { + return mixer_input_->current_audio_delay_milliseconds_; + } + MOCK_METHOD1(RemoveMixer, void(const AudioParameters&)); protected: @@ -60,26 +69,25 @@ class AudioRendererMixerInputTest : public testing::Test { scoped_ptr<AudioRendererMixer> mixer_; scoped_refptr<AudioRendererMixerInput> mixer_input_; scoped_ptr<FakeAudioRenderCallback> fake_callback_; + scoped_ptr<AudioBus> audio_bus_; DISALLOW_COPY_AND_ASSIGN(AudioRendererMixerInputTest); }; -// Test callback() works as expected. -TEST_F(AudioRendererMixerInputTest, GetCallback) { - EXPECT_EQ(mixer_input_->callback(), fake_callback_.get()); -} - -// Test that getting and setting the volume work as expected. +// Test that getting and setting the volume work as expected. The volume is +// returned from ProvideInput() only when playing. TEST_F(AudioRendererMixerInputTest, GetSetVolume) { - // Starting volume should be 0. - double volume = 1.0f; - mixer_input_->GetVolume(&volume); - EXPECT_EQ(volume, 1.0f); + mixer_input_->Start(); + mixer_input_->Play(); + + // Starting volume should be 1.0. + EXPECT_DOUBLE_EQ(ProvideInput(), 1); - const double kVolume = 0.5f; + const double kVolume = 0.5; EXPECT_TRUE(mixer_input_->SetVolume(kVolume)); - mixer_input_->GetVolume(&volume); - EXPECT_EQ(volume, kVolume); + EXPECT_DOUBLE_EQ(ProvideInput(), kVolume); + + mixer_input_->Stop(); } // Test Start()/Play()/Pause()/Stop()/playing() all work as expected. Also @@ -87,15 +95,15 @@ TEST_F(AudioRendererMixerInputTest, GetSetVolume) { // crashing; functional tests for these methods are in AudioRendererMixerTest. TEST_F(AudioRendererMixerInputTest, StartPlayPauseStopPlaying) { mixer_input_->Start(); - EXPECT_FALSE(mixer_input_->playing()); + EXPECT_DOUBLE_EQ(ProvideInput(), 0); mixer_input_->Play(); - EXPECT_TRUE(mixer_input_->playing()); + EXPECT_DOUBLE_EQ(ProvideInput(), 1); mixer_input_->Pause(false); - EXPECT_FALSE(mixer_input_->playing()); + EXPECT_DOUBLE_EQ(ProvideInput(), 0); mixer_input_->Play(); - EXPECT_TRUE(mixer_input_->playing()); + EXPECT_DOUBLE_EQ(ProvideInput(), 1); mixer_input_->Stop(); - EXPECT_FALSE(mixer_input_->playing()); + EXPECT_DOUBLE_EQ(ProvideInput(), 0); } // Test that Stop() can be called before Initialize() and Start(). diff --git a/media/base/audio_renderer_mixer_unittest.cc b/media/base/audio_renderer_mixer_unittest.cc index aad2313..7315b85f 100644 --- a/media/base/audio_renderer_mixer_unittest.cc +++ b/media/base/audio_renderer_mixer_unittest.cc @@ -77,8 +77,8 @@ class AudioRendererMixerTest fake_callbacks_.reserve(count); // Setup FakeAudioRenderCallback step to compensate for resampling. - double scale_factor = input_parameters_.sample_rate() - / static_cast<double>(output_parameters_.sample_rate()); + double scale_factor = input_parameters_.sample_rate() / + static_cast<double>(output_parameters_.sample_rate()); double step = kSineCycles / (scale_factor * static_cast<double>(output_parameters_.frames_per_buffer())); @@ -95,14 +95,14 @@ class AudioRendererMixerTest EXPECT_CALL(*this, RemoveMixer(testing::_)).Times(count); } - bool ValidateAudioData(int index, int frames, float scale) { + bool ValidateAudioData(int index, int frames, float scale, double epsilon) { for (int i = 0; i < audio_bus_->channels(); ++i) { for (int j = index; j < frames; j++) { double error = fabs(audio_bus_->channel(i)[j] - expected_audio_bus_->channel(i)[j] * scale); - if (error > epsilon_) { + if (error > epsilon) { EXPECT_NEAR(expected_audio_bus_->channel(i)[j] * scale, - audio_bus_->channel(i)[j], epsilon_) + audio_bus_->channel(i)[j], epsilon) << " i=" << i << ", j=" << j; return false; } @@ -111,18 +111,15 @@ class AudioRendererMixerTest return true; } - bool RenderAndValidateAudioData(float scale) { - // Half fill won't be exactly half when resampling since the resampler - // will have enough data to fill out more of the buffer based on its - // internal buffer and kernel size. So special case some of the checks. - bool resampling = input_parameters_.sample_rate() - != output_parameters_.sample_rate(); + bool ValidateAudioData(int index, int frames, float scale) { + return ValidateAudioData(index, frames, scale, epsilon_); + } + bool RenderAndValidateAudioData(float scale) { if (half_fill_) { for (size_t i = 0; i < fake_callbacks_.size(); ++i) fake_callbacks_[i]->set_half_fill(true); expected_callback_->set_half_fill(true); - expected_audio_bus_->Zero(); } // Render actual audio data. @@ -134,13 +131,10 @@ class AudioRendererMixerTest expected_callback_->Render(expected_audio_bus_.get(), 0); if (half_fill_) { - // Verify first half of audio data for both resampling and non-resampling. - if (!ValidateAudioData(0, frames / 2, scale)) - return false; - // Verify silence in the second half if we're not resampling. - if (!resampling) - return ValidateAudioData(frames / 2, frames, 0); - return true; + // In this case, just verify that every frame was initialized, this will + // only fail under tooling such as valgrind. + return ValidateAudioData( + 0, frames, 0, std::numeric_limits<double>::max()); } else { return ValidateAudioData(0, frames, scale); } @@ -388,26 +382,6 @@ TEST_P(AudioRendererMixerTest, OnRenderError) { mixer_inputs_[i]->Stop(); } -// Verify that audio delay information is scaled to the input parameters. -TEST_P(AudioRendererMixerTest, DelayTest) { - InitializeInputs(1); - static const int kAudioDelayMilliseconds = 100; - ASSERT_EQ(mixer_inputs_.size(), 1u); - - // Start the input and issue a single render callback. - mixer_inputs_[0]->Start(); - mixer_inputs_[0]->Play(); - mixer_callback_->Render(audio_bus_.get(), kAudioDelayMilliseconds); - - // The input to output ratio should only include the sample rate difference. - double io_ratio = input_parameters_.sample_rate() / - static_cast<double>(output_parameters_.sample_rate()); - - EXPECT_EQ(static_cast<int>(kAudioDelayMilliseconds / io_ratio), - fake_callbacks_[0]->last_audio_delay_milliseconds()); - mixer_inputs_[0]->Stop(); -} - // Ensure constructing an AudioRendererMixerInput, but not initializing it does // not call RemoveMixer(). TEST_P(AudioRendererMixerTest, NoInitialize) { diff --git a/media/base/fake_audio_render_callback.cc b/media/base/fake_audio_render_callback.cc index 65b6ac9..af55910 100644 --- a/media/base/fake_audio_render_callback.cc +++ b/media/base/fake_audio_render_callback.cc @@ -5,16 +5,17 @@ // MSVC++ requires this to be set before any other includes to get M_PI. #define _USE_MATH_DEFINES -#include "media/base/fake_audio_render_callback.h" - #include <cmath> +#include "media/base/fake_audio_render_callback.h" + namespace media { FakeAudioRenderCallback::FakeAudioRenderCallback(double step) : half_fill_(false), step_(step), - last_audio_delay_milliseconds_(-1) { + last_audio_delay_milliseconds_(-1), + volume_(1) { reset(); } @@ -40,4 +41,10 @@ int FakeAudioRenderCallback::Render(AudioBus* audio_bus, return number_of_frames; } +double FakeAudioRenderCallback::ProvideInput(AudioBus* audio_bus, + base::TimeDelta buffer_delay) { + Render(audio_bus, buffer_delay.InMilliseconds()); + return volume_; +} + } // namespace media diff --git a/media/base/fake_audio_render_callback.h b/media/base/fake_audio_render_callback.h index 760e39d..5318c99 100644 --- a/media/base/fake_audio_render_callback.h +++ b/media/base/fake_audio_render_callback.h @@ -5,6 +5,7 @@ #ifndef MEDIA_BASE_FAKE_AUDIO_RENDER_CALLBACK_H_ #define MEDIA_BASE_FAKE_AUDIO_RENDER_CALLBACK_H_ +#include "media/base/audio_converter.h" #include "media/base/audio_renderer_sink.h" #include "testing/gmock/include/gmock/gmock.h" @@ -12,7 +13,10 @@ namespace media { // Fake RenderCallback which will fill each request with a sine wave. Sine // state is kept across callbacks. State can be reset to default via reset(). -class FakeAudioRenderCallback : public AudioRendererSink::RenderCallback { +// Also provide an interface to AudioTransformInput. +class FakeAudioRenderCallback + : public AudioRendererSink::RenderCallback, + public AudioConverter::InputCallback { public: // The function used to fulfill Render() is f(x) = sin(2 * PI * x * |step|), // where x = [|number_of_frames| * m, |number_of_frames| * (m + 1)] and m = @@ -22,9 +26,14 @@ class FakeAudioRenderCallback : public AudioRendererSink::RenderCallback { // Renders a sine wave into the provided audio data buffer. If |half_fill_| // is set, will only fill half the buffer. - int Render(AudioBus* audio_bus, int audio_delay_milliseconds) OVERRIDE; + virtual int Render(AudioBus* audio_bus, + int audio_delay_milliseconds) OVERRIDE; MOCK_METHOD0(OnRenderError, void()); + // AudioTransform::ProvideAudioTransformInput implementation. + virtual double ProvideInput(AudioBus* audio_bus, + base::TimeDelta buffer_delay) OVERRIDE; + // Toggles only filling half the requested amount during Render(). void set_half_fill(bool half_fill) { half_fill_ = half_fill; } @@ -35,11 +44,15 @@ class FakeAudioRenderCallback : public AudioRendererSink::RenderCallback { // no Render() call occurred. int last_audio_delay_milliseconds() { return last_audio_delay_milliseconds_; } + // Set volume information used by ProvideAudioTransformInput(). + void set_volume(double volume) { volume_ = volume; } + private: bool half_fill_; double x_; double step_; int last_audio_delay_milliseconds_; + double volume_; DISALLOW_COPY_AND_ASSIGN(FakeAudioRenderCallback); }; diff --git a/media/base/multi_channel_resampler.cc b/media/base/multi_channel_resampler.cc index b8df97d..a5cbf3ef 100644 --- a/media/base/multi_channel_resampler.cc +++ b/media/base/multi_channel_resampler.cc @@ -15,7 +15,8 @@ MultiChannelResampler::MultiChannelResampler(int channels, double io_sample_rate_ratio, const ReadCB& read_cb) : last_frame_count_(0), - read_cb_(read_cb) { + read_cb_(read_cb), + output_frames_ready_(0) { // Allocate each channel's resampler. resamplers_.reserve(channels); for (int i = 0; i < channels; ++i) { @@ -33,10 +34,10 @@ void MultiChannelResampler::Resample(AudioBus* audio_bus, int frames) { // channel. To ensure this, we chunk the number of requested frames into // SincResampler::ChunkSize() sized chunks. SincResampler guarantees it will // only call ProvideInput() once when we resample this way. - int frames_done = 0; + output_frames_ready_ = 0; int chunk_size = resamplers_[0]->ChunkSize(); - while (frames_done < frames) { - int frames_this_time = std::min(frames - frames_done, chunk_size); + while (output_frames_ready_ < frames) { + int frames_this_time = std::min(frames - output_frames_ready_, chunk_size); // Resample each channel. for (size_t i = 0; i < resamplers_.size(); ++i) { @@ -49,10 +50,10 @@ void MultiChannelResampler::Resample(AudioBus* audio_bus, int frames) { // since they all buffer in the same way and are processing the same // number of frames. resamplers_[i]->Resample( - audio_bus->channel(i) + frames_done, frames_this_time); + audio_bus->channel(i) + output_frames_ready_, frames_this_time); } - frames_done += frames_this_time; + output_frames_ready_ += frames_this_time; } } @@ -82,7 +83,7 @@ void MultiChannelResampler::ProvideInput(int channel, float* destination, } last_frame_count_ = frames; - read_cb_.Run(wrapped_resampler_audio_bus_.get()); + read_cb_.Run(output_frames_ready_, wrapped_resampler_audio_bus_.get()); } else { // All channels must ask for the same amount. This should always be the // case, but let's just make sure. diff --git a/media/base/multi_channel_resampler.h b/media/base/multi_channel_resampler.h index 748bb47..6dd565b 100644 --- a/media/base/multi_channel_resampler.h +++ b/media/base/multi_channel_resampler.h @@ -21,8 +21,9 @@ class MEDIA_EXPORT MultiChannelResampler { public: // Callback type for providing more data into the resampler. Expects AudioBus // to be completely filled with data upon return; zero padded if not enough - // frames are available to satisfy the request. - typedef base::Callback<void(AudioBus* audio_bus)> ReadCB; + // frames are available to satisfy the request. |frame_delay| is the number + // of output frames already processed and can be used to estimate delay. + typedef base::Callback<void(int frame_delay, AudioBus* audio_bus)> ReadCB; // Constructs a MultiChannelResampler with the specified |read_cb|, which is // used to acquire audio data for resampling. |io_sample_rate_ratio| is the @@ -56,6 +57,12 @@ class MEDIA_EXPORT MultiChannelResampler { scoped_ptr<AudioBus> resampler_audio_bus_; scoped_ptr<AudioBus> wrapped_resampler_audio_bus_; std::vector<float*> resampler_audio_data_; + + // The number of output frames that have successfully been processed during + // the current Resample() call. + int output_frames_ready_; + + DISALLOW_COPY_AND_ASSIGN(MultiChannelResampler); }; } // namespace media diff --git a/media/base/multi_channel_resampler_unittest.cc b/media/base/multi_channel_resampler_unittest.cc index 623c9ef..ad67550 100644 --- a/media/base/multi_channel_resampler_unittest.cc +++ b/media/base/multi_channel_resampler_unittest.cc @@ -37,7 +37,9 @@ static const double kHighLatencyMaxError = 0.04; class MultiChannelResamplerTest : public testing::TestWithParam<int> { public: - MultiChannelResamplerTest() {} + MultiChannelResamplerTest() + : last_frame_delay_(-1) { + } virtual ~MultiChannelResamplerTest() {} void InitializeAudioData(int channels, int frames) { @@ -47,7 +49,10 @@ class MultiChannelResamplerTest // MultiChannelResampler::MultiChannelAudioSourceProvider implementation, just // fills the provided audio_data with |kFillValue|. - virtual void ProvideInput(AudioBus* audio_bus) { + virtual void ProvideInput(int frame_delay, AudioBus* audio_bus) { + EXPECT_GT(frame_delay, last_frame_delay_); + last_frame_delay_ = frame_delay; + float fill_value = fill_junk_values_ ? (1 / kFillValue) : kFillValue; EXPECT_EQ(audio_bus->channels(), audio_bus_->channels()); for (int i = 0; i < audio_bus->channels(); ++i) @@ -58,15 +63,19 @@ class MultiChannelResamplerTest void MultiChannelTest(int channels, int frames, double expected_max_rms_error, double expected_max_error) { InitializeAudioData(channels, frames); - MultiChannelResampler resampler( - channels, kScaleFactor, base::Bind( - &MultiChannelResamplerTest::ProvideInput, - base::Unretained(this))); + MultiChannelResampler resampler(channels, kScaleFactor, base::Bind( + &MultiChannelResamplerTest::ProvideInput, base::Unretained(this))); + // First prime the resampler with some junk data, so we can verify Flush(). fill_junk_values_ = true; resampler.Resample(audio_bus_.get(), 1); resampler.Flush(); fill_junk_values_ = false; + + // The last frame delay should be strictly less than the total frame count. + EXPECT_LT(last_frame_delay_, audio_bus_->frames()); + last_frame_delay_ = -1; + // If Flush() didn't work, the rest of the tests will fail. resampler.Resample(audio_bus_.get(), frames); TestValues(expected_max_rms_error, expected_max_error); @@ -108,6 +117,7 @@ class MultiChannelResamplerTest int frames_; bool fill_junk_values_; scoped_ptr<AudioBus> audio_bus_; + int last_frame_delay_; DISALLOW_COPY_AND_ASSIGN(MultiChannelResamplerTest); }; diff --git a/media/media.gyp b/media/media.gyp index eca9c7f..76e09b7 100644 --- a/media/media.gyp +++ b/media/media.gyp @@ -159,6 +159,8 @@ 'base/android/media_player_bridge_manager.cc', 'base/android/media_player_bridge_manager.h', 'base/audio_capturer_source.h', + 'base/audio_converter.cc', + 'base/audio_converter.h', 'base/audio_decoder.cc', 'base/audio_decoder.h', 'base/audio_decoder_config.cc', @@ -624,6 +626,7 @@ 'audio/win/audio_unified_win_unittest.cc', 'audio/win/core_audio_util_win_unittest.cc', 'base/audio_bus_unittest.cc', + 'base/audio_converter_unittest.cc', 'base/audio_fifo_unittest.cc', 'base/audio_pull_fifo_unittest.cc', 'base/audio_renderer_mixer_input_unittest.cc', diff --git a/remoting/codec/audio_encoder_opus.cc b/remoting/codec/audio_encoder_opus.cc index 6ff3056..ab19c94 100644 --- a/remoting/codec/audio_encoder_opus.cc +++ b/remoting/codec/audio_encoder_opus.cc @@ -124,7 +124,8 @@ bool AudioEncoderOpus::ResetForPacket(AudioPacket* packet) { return encoder_ != NULL; } -void AudioEncoderOpus::FetchBytesToResample(media::AudioBus* audio_bus) { +void AudioEncoderOpus::FetchBytesToResample(int resampler_frame_delay, + media::AudioBus* audio_bus) { DCHECK(resampling_data_); int samples_left = (resampling_data_size_ - resampling_data_pos_) / kBytesPerSample / channels_; diff --git a/remoting/codec/audio_encoder_opus.h b/remoting/codec/audio_encoder_opus.h index b172de2..7f610d4 100644 --- a/remoting/codec/audio_encoder_opus.h +++ b/remoting/codec/audio_encoder_opus.h @@ -34,7 +34,8 @@ class AudioEncoderOpus : public AudioEncoder { void DestroyEncoder(); bool ResetForPacket(AudioPacket* packet); - void FetchBytesToResample(media::AudioBus* audio_bus); + void FetchBytesToResample(int resampler_frame_delay, + media::AudioBus* audio_bus); int sampling_rate_; AudioPacket::Channels channels_; |