diff options
author | dalecurtis@chromium.org <dalecurtis@chromium.org@0039d316-1c4b-4281-b951-d872f2087c98> | 2012-08-09 21:48:31 +0000 |
---|---|---|
committer | dalecurtis@chromium.org <dalecurtis@chromium.org@0039d316-1c4b-4281-b951-d872f2087c98> | 2012-08-09 21:48:31 +0000 |
commit | 71cdf717853b37392d333bd89636a6a59c609ea2 (patch) | |
tree | b0ccdfbd4781c83c24fa7fe139bff637b044042d /media | |
parent | 88031984f605e614c6269a13197c8cd58ce27b95 (diff) | |
download | chromium_src-71cdf717853b37392d333bd89636a6a59c609ea2.zip chromium_src-71cdf717853b37392d333bd89636a6a59c609ea2.tar.gz chromium_src-71cdf717853b37392d333bd89636a6a59c609ea2.tar.bz2 |
Switch AudioRenderSink::Callback to use AudioBus.
As titled, switches everything over to using the AudioBus
class instead of const std::vector<float*>. Allows removal
of lots of crufty allocations and memsets.
BUG=114700
TEST=unit tests, layout tests, try bots. Nothing should change.
Review URL: https://chromiumcodereview.appspot.com/10823175
git-svn-id: svn://svn.chromium.org/chrome/trunk/src@150906 0039d316-1c4b-4281-b951-d872f2087c98
Diffstat (limited to 'media')
22 files changed, 163 insertions, 259 deletions
diff --git a/media/audio/audio_device_thread.cc b/media/audio/audio_device_thread.cc index 83eb0c4..3e14d3d 100644 --- a/media/audio/audio_device_thread.cc +++ b/media/audio/audio_device_thread.cc @@ -13,6 +13,7 @@ #include "base/threading/platform_thread.h" #include "base/threading/thread_restrictions.h" #include "media/audio/audio_util.h" +#include "media/base/audio_bus.h" using base::PlatformThread; @@ -188,23 +189,17 @@ AudioDeviceThread::Callback::Callback( CHECK_NE(samples_per_ms_, 0); } -AudioDeviceThread::Callback::~Callback() { - for (size_t i = 0; i < audio_data_.size(); ++i) - base::AlignedFree(audio_data_[i]); -} +AudioDeviceThread::Callback::~Callback() {} void AudioDeviceThread::Callback::InitializeOnAudioThread() { - DCHECK(audio_data_.empty()); + DCHECK(!audio_bus_.get()); MapSharedMemory(); DCHECK(shared_memory_.memory() != NULL); - // Allocate buffer with a 16-byte alignment to allow SSE optimizations. - audio_data_.reserve(audio_parameters_.channels()); - for (int i = 0; i < audio_parameters_.channels(); ++i) { - audio_data_.push_back(static_cast<float*>(base::AlignedAlloc( - sizeof(float) * audio_parameters_.frames_per_buffer(), 16))); - } + // TODO(dalecurtis): Instead of creating a new AudioBus and memcpy'ing into + // the shared memory we should wrap the shared memory. + audio_bus_ = AudioBus::Create(audio_parameters_); } } // namespace media. diff --git a/media/audio/audio_device_thread.h b/media/audio/audio_device_thread.h index a50339a..d229613 100644 --- a/media/audio/audio_device_thread.h +++ b/media/audio/audio_device_thread.h @@ -5,10 +5,9 @@ #ifndef MEDIA_AUDIO_AUDIO_DEVICE_THREAD_H_ #define MEDIA_AUDIO_AUDIO_DEVICE_THREAD_H_ -#include <vector> - #include "base/basictypes.h" #include "base/memory/ref_counted.h" +#include "base/memory/scoped_ptr.h" #include "base/shared_memory.h" #include "base/sync_socket.h" #include "base/synchronization/lock.h" @@ -18,6 +17,7 @@ class MessageLoop; namespace media { +class AudioBus; // Data transfer between browser and render process uses a combination // of sync sockets and shared memory. To read from the socket and render @@ -60,7 +60,7 @@ class MEDIA_EXPORT AudioDeviceThread { // Audio buffers that are allocated in InitializeOnAudioThread() based on // info from audio_parameters_. - std::vector<float*> audio_data_; + scoped_ptr<AudioBus> audio_bus_; base::SharedMemory shared_memory_; const int memory_length_; diff --git a/media/audio/audio_input_device.cc b/media/audio/audio_input_device.cc index 401042a..b1ab6f8 100644 --- a/media/audio/audio_input_device.cc +++ b/media/audio/audio_input_device.cc @@ -10,6 +10,7 @@ #include "base/time.h" #include "media/audio/audio_manager_base.h" #include "media/audio/audio_util.h" +#include "media/base/audio_bus.h" namespace media { @@ -328,24 +329,23 @@ void AudioInputDevice::AudioThreadCallback::Process(int pending_data) { int audio_delay_milliseconds = pending_data / bytes_per_ms_; int16* memory = reinterpret_cast<int16*>(&buffer->audio[0]); - const size_t number_of_frames = audio_parameters_.frames_per_buffer(); const int bytes_per_sample = sizeof(memory[0]); // Deinterleave each channel and convert to 32-bit floating-point // with nominal range -1.0 -> +1.0. - for (int channel_index = 0; channel_index < audio_parameters_.channels(); + for (int channel_index = 0; channel_index < audio_bus_->channels(); ++channel_index) { DeinterleaveAudioChannel(memory, - audio_data_[channel_index], - audio_parameters_.channels(), + audio_bus_->channel(channel_index), + audio_bus_->channels(), channel_index, bytes_per_sample, - number_of_frames); + audio_bus_->frames()); } // Deliver captured data to the client in floating point format // and update the audio-delay measurement. - capture_callback_->Capture(audio_data_, number_of_frames, + capture_callback_->Capture(audio_bus_.get(), audio_delay_milliseconds, volume); } diff --git a/media/audio/audio_input_device.h b/media/audio/audio_input_device.h index 42d6869..ddb2535 100644 --- a/media/audio/audio_input_device.h +++ b/media/audio/audio_input_device.h @@ -88,8 +88,7 @@ class MEDIA_EXPORT AudioInputDevice public: class MEDIA_EXPORT CaptureCallback { public: - virtual void Capture(const std::vector<float*>& audio_data, - int number_of_frames, + virtual void Capture(AudioBus* audio_bus, int audio_delay_milliseconds, double volume) = 0; virtual void OnCaptureError() = 0; diff --git a/media/audio/audio_output_device.cc b/media/audio/audio_output_device.cc index eff1a0c..994fa40 100644 --- a/media/audio/audio_output_device.cc +++ b/media/audio/audio_output_device.cc @@ -266,17 +266,18 @@ void AudioOutputDevice::AudioThreadCallback::Process(int pending_data) { TRACE_EVENT0("audio", "AudioOutputDevice::FireRenderCallback"); // Update the audio-delay measurement then ask client to render audio. - size_t num_frames = render_callback_->Render(audio_data_, - audio_parameters_.frames_per_buffer(), audio_delay_milliseconds); + size_t num_frames = render_callback_->Render( + audio_bus_.get(), audio_delay_milliseconds); // Interleave, scale, and clip to int. - // TODO(crogers/vrk): Figure out a way to avoid the float -> int -> float - // conversions that happen in the <audio> and WebRTC scenarios. - InterleaveFloatToInt(audio_data_, shared_memory_.memory(), - num_frames, audio_parameters_.bits_per_sample() / 8); + // TODO(dalecurtis): Remove this when we have float everywhere. + InterleaveFloatToInt( + audio_bus_.get(), shared_memory_.memory(), num_frames, + audio_parameters_.bits_per_sample() / 8); // Let the host know we are done. - SetActualDataSizeInBytes(&shared_memory_, memory_length_, + SetActualDataSizeInBytes( + &shared_memory_, memory_length_, num_frames * audio_parameters_.GetBytesPerFrame()); } diff --git a/media/audio/audio_output_device_unittest.cc b/media/audio/audio_output_device_unittest.cc index 8133f34..152e958 100644 --- a/media/audio/audio_output_device_unittest.cc +++ b/media/audio/audio_output_device_unittest.cc @@ -35,9 +35,7 @@ class MockRenderCallback : public AudioRendererSink::RenderCallback { MockRenderCallback() {} virtual ~MockRenderCallback() {} - MOCK_METHOD3(Render, int(const std::vector<float*>& audio_data, - int number_of_frames, - int audio_delay_milliseconds)); + MOCK_METHOD2(Render, int(AudioBus* audio_bus, int audio_delay_milliseconds)); MOCK_METHOD0(OnRenderError, void()); }; @@ -87,19 +85,6 @@ ACTION_P(QuitLoop, loop) { loop->PostTask(FROM_HERE, MessageLoop::QuitClosure()); } -// Zeros out |number_of_frames| in all channel buffers pointed to by -// the |audio_data| vector. -void ZeroAudioData(int number_of_frames, - const std::vector<float*>& audio_data) { - std::vector<float*>::const_iterator it = audio_data.begin(); - for (; it != audio_data.end(); ++it) { - float* channel = *it; - for (int j = 0; j < number_of_frames; ++j) { - channel[j] = 0.0f; - } - } -} - } // namespace. class AudioOutputDeviceTest : public testing::Test { @@ -222,21 +207,10 @@ TEST_F(AudioOutputDeviceTest, CreateStream) { // So, for the sake of this test, we consider the call to Render a sign // of success and quit the loop. - // A note on the call to ZeroAudioData(): - // Valgrind caught a bug in AudioOutputDevice::AudioThreadCallback::Process() - // whereby we always interleaved all the frames in the buffer regardless - // of how many were actually rendered. So to keep the benefits of that - // test, we explicitly pass 0 in here as the number of frames to - // ZeroAudioData(). Other tests might want to pass the requested number - // by using WithArgs<1, 0>(Invoke(&ZeroAudioData)) and set the return - // value accordingly. const int kNumberOfFramesToProcess = 0; - EXPECT_CALL(callback_, Render(_, _, _)) + EXPECT_CALL(callback_, Render(_, _)) .WillOnce(DoAll( - WithArgs<0>(Invoke( - testing::CreateFunctor(&ZeroAudioData, - kNumberOfFramesToProcess))), QuitLoop(io_loop_.message_loop_proxy()), Return(kNumberOfFramesToProcess))); diff --git a/media/audio/audio_util.cc b/media/audio/audio_util.cc index a8ce2b5..d54f277 100644 --- a/media/audio/audio_util.cc +++ b/media/audio/audio_util.cc @@ -22,6 +22,7 @@ #include "base/time.h" #include "media/audio/audio_parameters.h" #include "media/audio/audio_util.h" +#include "media/base/audio_bus.h" #if defined(OS_MACOSX) #include "media/audio/mac/audio_low_latency_input_mac.h" @@ -230,7 +231,7 @@ bool DeinterleaveAudioChannel(void* source, // |Format| is the destination type, |Fixed| is a type larger than |Format| // such that operations can be made without overflowing. template<class Format, class Fixed> -static void InterleaveFloatToInt(const std::vector<float*>& source, +static void InterleaveFloatToInt(const AudioBus* source, void* dst_bytes, size_t number_of_frames) { Format* destination = reinterpret_cast<Format*>(dst_bytes); Fixed max_value = std::numeric_limits<Format>::max(); @@ -243,9 +244,9 @@ static void InterleaveFloatToInt(const std::vector<float*>& source, min_value = -(bias - 1); } - int channels = source.size(); + int channels = source->channels(); for (int i = 0; i < channels; ++i) { - float* channel_data = source[i]; + const float* channel_data = source->channel(i); for (size_t j = 0; j < number_of_frames; ++j) { Fixed sample = max_value * channel_data[j]; if (sample > max_value) @@ -258,7 +259,7 @@ static void InterleaveFloatToInt(const std::vector<float*>& source, } } -void InterleaveFloatToInt(const std::vector<float*>& source, void* dst, +void InterleaveFloatToInt(const AudioBus* source, void* dst, size_t number_of_frames, int bytes_per_sample) { switch (bytes_per_sample) { case 1: diff --git a/media/audio/audio_util.h b/media/audio/audio_util.h index 4ac0ef6..d25fdf9 100644 --- a/media/audio/audio_util.h +++ b/media/audio/audio_util.h @@ -6,7 +6,6 @@ #define MEDIA_AUDIO_AUDIO_UTIL_H_ #include <string> -#include <vector> #include "base/basictypes.h" #include "media/base/channel_layout.h" @@ -17,6 +16,7 @@ class SharedMemory; } namespace media { +class AudioBus; // For all audio functions 3 audio formats are supported: // 8 bits unsigned 0 to 255. @@ -86,7 +86,7 @@ MEDIA_EXPORT bool DeinterleaveAudioChannel(void* source, // The size of the |source| vector determines the number of channels. // The |destination| buffer is assumed to be large enough to hold the // result. Thus it must be at least size: number_of_frames * source.size() -MEDIA_EXPORT void InterleaveFloatToInt(const std::vector<float*>& source, +MEDIA_EXPORT void InterleaveFloatToInt(const AudioBus* audio_bus, void* destination, size_t number_of_frames, int bytes_per_sample); diff --git a/media/audio/null_audio_sink.cc b/media/audio/null_audio_sink.cc index d98aa63..eab0479 100644 --- a/media/audio/null_audio_sink.cc +++ b/media/audio/null_audio_sink.cc @@ -24,15 +24,11 @@ void NullAudioSink::Initialize(const AudioParameters& params, DCHECK(!initialized_); params_ = params; - audio_data_.reserve(params.channels()); - for (int i = 0; i < params.channels(); ++i) { - float* channel_data = new float[params.frames_per_buffer()]; - audio_data_.push_back(channel_data); - } + audio_bus_ = AudioBus::Create(params_); if (hash_audio_for_testing_) { - md5_channel_contexts_.reset(new base::MD5Context[params.channels()]); - for (int i = 0; i < params.channels(); i++) + md5_channel_contexts_.reset(new base::MD5Context[params_.channels()]); + for (int i = 0; i < params_.channels(); i++) base::MD5Init(&md5_channel_contexts_[i]); } @@ -73,8 +69,6 @@ void NullAudioSink::SetPlaying(bool is_playing) { NullAudioSink::~NullAudioSink() { DCHECK(!thread_.IsRunning()); - for (size_t i = 0; i < audio_data_.size(); ++i) - delete [] audio_data_[i]; } void NullAudioSink::FillBufferTask() { @@ -83,16 +77,15 @@ void NullAudioSink::FillBufferTask() { base::TimeDelta delay; // Only consume buffers when actually playing. if (playing_) { - int requested_frames = params_.frames_per_buffer(); - int frames_received = callback_->Render(audio_data_, requested_frames, 0); + int frames_received = callback_->Render(audio_bus_.get(), 0); int frames_per_millisecond = params_.sample_rate() / base::Time::kMillisecondsPerSecond; if (hash_audio_for_testing_ && frames_received > 0) { DCHECK_EQ(sizeof(float), sizeof(uint32)); - int channels = audio_data_.size(); + int channels = audio_bus_->channels(); for (int channel_idx = 0; channel_idx < channels; ++channel_idx) { - float* channel = audio_data_[channel_idx]; + float* channel = audio_bus_->channel(channel_idx); for (int frame_idx = 0; frame_idx < frames_received; frame_idx++) { // Convert float to uint32 w/o conversion loss. uint32 frame = base::ByteSwapToLE32( @@ -135,7 +128,7 @@ std::string NullAudioSink::GetAudioHashForTesting() { // Hash all channels into the first channel. base::MD5Digest digest; - for (size_t i = 1; i < audio_data_.size(); i++) { + for (int i = 1; i < audio_bus_->channels(); i++) { base::MD5Final(&digest, &md5_channel_contexts_[i]); base::MD5Update(&md5_channel_contexts_[0], base::StringPiece( reinterpret_cast<char*>(&digest), sizeof(base::MD5Digest))); diff --git a/media/audio/null_audio_sink.h b/media/audio/null_audio_sink.h index db32df0..c528473 100644 --- a/media/audio/null_audio_sink.h +++ b/media/audio/null_audio_sink.h @@ -13,13 +13,13 @@ // audio device or we haven't written an audio implementation for a particular // platform yet. -#include <vector> - #include "base/md5.h" +#include "base/memory/scoped_ptr.h" #include "base/threading/thread.h" #include "media/base/audio_renderer_sink.h" namespace media { +class AudioBus; class MEDIA_EXPORT NullAudioSink : NON_EXPORTED_BASE(public AudioRendererSink) { @@ -53,7 +53,7 @@ class MEDIA_EXPORT NullAudioSink void SetPlaying(bool is_playing); // A buffer passed to FillBuffer to advance playback. - std::vector<float*> audio_data_; + scoped_ptr<AudioBus> audio_bus_; AudioParameters params_; bool initialized_; diff --git a/media/base/audio_renderer_mixer.cc b/media/base/audio_renderer_mixer.cc index 1ca2f39..503f139 100644 --- a/media/base/audio_renderer_mixer.cc +++ b/media/base/audio_renderer_mixer.cc @@ -46,11 +46,6 @@ AudioRendererMixer::~AudioRendererMixer() { // AudioRendererSinks must be stopped before being destructed. audio_sink_->Stop(); - // Clean up |mixer_input_audio_data_|. - for (size_t i = 0; i < mixer_input_audio_data_.size(); ++i) - base::AlignedFree(mixer_input_audio_data_[i]); - mixer_input_audio_data_.clear(); - // Ensures that all mixer inputs have stopped themselves prior to destruction // and have called RemoveMixerInput(). DCHECK_EQ(mixer_inputs_.size(), 0U); @@ -68,45 +63,40 @@ void AudioRendererMixer::RemoveMixerInput( mixer_inputs_.erase(input); } -int AudioRendererMixer::Render(const std::vector<float*>& audio_data, - int number_of_frames, +int AudioRendererMixer::Render(AudioBus* audio_bus, int audio_delay_milliseconds) { current_audio_delay_milliseconds_ = audio_delay_milliseconds; if (resampler_.get()) - resampler_->Resample(audio_data, number_of_frames); + resampler_->Resample(audio_bus, audio_bus->frames()); else - ProvideInput(audio_data, number_of_frames); + ProvideInput(audio_bus); // Always return the full number of frames requested, ProvideInput() will pad // with silence if it wasn't able to acquire enough data. - return number_of_frames; + return audio_bus->frames(); } -void AudioRendererMixer::ProvideInput(const std::vector<float*>& audio_data, - int number_of_frames) { +void AudioRendererMixer::ProvideInput(AudioBus* audio_bus) { base::AutoLock auto_lock(mixer_inputs_lock_); // Allocate staging area for each mixer input's audio data on first call. We - // won't know how much to allocate until here because of resampling. - if (mixer_input_audio_data_.size() == 0) { - mixer_input_audio_data_.reserve(audio_data.size()); - for (size_t i = 0; i < audio_data.size(); ++i) { - // Allocate audio data with a 16-byte alignment for SSE optimizations. - mixer_input_audio_data_.push_back(static_cast<float*>( - base::AlignedAlloc(sizeof(float) * number_of_frames, 16))); - } - mixer_input_audio_data_size_ = number_of_frames; + // won't know how much to allocate until here because of resampling. Ensure + // our intermediate AudioBus is sized exactly as the original. Resize should + // only happen once due to the way the resampler works. + if (!mixer_input_audio_bus_.get() || + mixer_input_audio_bus_->frames() != audio_bus->frames()) { + mixer_input_audio_bus_ = + AudioBus::Create(audio_bus->channels(), audio_bus->frames()); } // Sanity check our inputs. - DCHECK_LE(number_of_frames, mixer_input_audio_data_size_); - DCHECK_EQ(audio_data.size(), mixer_input_audio_data_.size()); + DCHECK_EQ(audio_bus->frames(), mixer_input_audio_bus_->frames()); + DCHECK_EQ(audio_bus->channels(), mixer_input_audio_bus_->channels()); - // Zero |audio_data| so we're mixing into a clean buffer and return silence if + // Zero |audio_bus| so we're mixing into a clean buffer and return silence if // we couldn't get enough data from our inputs. - for (size_t i = 0; i < audio_data.size(); ++i) - memset(audio_data[i], 0, number_of_frames * sizeof(*audio_data[i])); + audio_bus->Zero(); // Have each mixer render its data into an output buffer then mix the result. for (AudioRendererMixerInputSet::iterator it = mixer_inputs_.begin(); @@ -121,15 +111,14 @@ void AudioRendererMixer::ProvideInput(const std::vector<float*>& audio_data, continue; int frames_filled = input->callback()->Render( - mixer_input_audio_data_, number_of_frames, - current_audio_delay_milliseconds_); + mixer_input_audio_bus_.get(), current_audio_delay_milliseconds_); if (frames_filled == 0) continue; - // Volume adjust and mix each mixer input into |audio_data| after rendering. - for (size_t j = 0; j < audio_data.size(); ++j) { - VectorFMAC( - mixer_input_audio_data_[j], volume, frames_filled, audio_data[j]); + // Volume adjust and mix each mixer input into |audio_bus| after rendering. + for (int i = 0; i < audio_bus->channels(); ++i) { + VectorFMAC(mixer_input_audio_bus_->channel(i), volume, frames_filled, + audio_bus->channel(i)); } // No need to clamp values as InterleaveFloatToInt() will take care of this diff --git a/media/base/audio_renderer_mixer.h b/media/base/audio_renderer_mixer.h index d293799..75b7d85 100644 --- a/media/base/audio_renderer_mixer.h +++ b/media/base/audio_renderer_mixer.h @@ -6,7 +6,6 @@ #define MEDIA_BASE_AUDIO_RENDERER_MIXER_H_ #include <set> -#include <vector> #include "base/gtest_prod_util.h" #include "base/synchronization/lock.h" @@ -38,16 +37,14 @@ class MEDIA_EXPORT AudioRendererMixer FRIEND_TEST_ALL_PREFIXES(AudioRendererMixerTest, VectorFMACBenchmark); // AudioRendererSink::RenderCallback implementation. - virtual int Render(const std::vector<float*>& audio_data, - int number_of_frames, + virtual int Render(AudioBus* audio_bus, int audio_delay_milliseconds) OVERRIDE; virtual void OnRenderError() OVERRIDE; - // Handles mixing and volume adjustment. Renders |number_of_frames| into - // |audio_data|. When resampling is necessary, ProvideInput() will be called + // Handles mixing and volume adjustment. Fully fills |audio_bus| with mixed + // audio data. When resampling is necessary, ProvideInput() will be called // by MultiChannelResampler when more data is necessary. - void ProvideInput(const std::vector<float*>& audio_data, - int number_of_frames); + void ProvideInput(AudioBus* audio_bus); // Multiply each element of |src| (up to |len|) by |scale| and add to |dest|. static void VectorFMAC(const float src[], float scale, int len, float dest[]); @@ -68,8 +65,7 @@ class MEDIA_EXPORT AudioRendererMixer base::Lock mixer_inputs_lock_; // Vector for rendering audio data from each mixer input. - int mixer_input_audio_data_size_; - std::vector<float*> mixer_input_audio_data_; + scoped_ptr<AudioBus> mixer_input_audio_bus_; // Handles resampling post-mixing. scoped_ptr<MultiChannelResampler> resampler_; diff --git a/media/base/audio_renderer_mixer_unittest.cc b/media/base/audio_renderer_mixer_unittest.cc index 8394c01..35917b8 100644 --- a/media/base/audio_renderer_mixer_unittest.cc +++ b/media/base/audio_renderer_mixer_unittest.cc @@ -191,19 +191,8 @@ class AudioRendererMixerTest input_parameters_, output_parameters_, sink_)); mixer_callback_ = sink_->callback(); - // TODO(dalecurtis): If we switch to AVX/SSE optimization, we'll need to - // allocate these on 32-byte boundaries and ensure they're sized % 32 bytes. - audio_data_.reserve(output_parameters_.channels()); - for (int i = 0; i < output_parameters_.channels(); ++i) - audio_data_.push_back(new float[output_parameters_.frames_per_buffer()]); - - // TODO(dalecurtis): If we switch to AVX/SSE optimization, we'll need to - // allocate these on 32-byte boundaries and ensure they're sized % 32 bytes. - expected_audio_data_.reserve(output_parameters_.channels()); - for (int i = 0; i < output_parameters_.channels(); ++i) { - expected_audio_data_.push_back( - new float[output_parameters_.frames_per_buffer()]); - } + audio_bus_ = AudioBus::Create(output_parameters_); + expected_audio_bus_ = AudioBus::Create(output_parameters_); // Allocate one callback for generating expected results. double step = kSineCycles / static_cast<double>( @@ -241,13 +230,13 @@ class AudioRendererMixerTest } bool ValidateAudioData(int index, int frames, float scale) { - for (size_t i = 0; i < audio_data_.size(); ++i) { + for (int i = 0; i < audio_bus_->channels(); ++i) { for (int j = index; j < frames; j++) { - double error = fabs( - audio_data_[i][j] - expected_audio_data_[i][j] * scale); + double error = fabs(audio_bus_->channel(i)[j] - + expected_audio_bus_->channel(i)[j] * scale); if (error > epsilon_) { - EXPECT_NEAR( - expected_audio_data_[i][j] * scale, audio_data_[i][j], epsilon_) + EXPECT_NEAR(expected_audio_bus_->channel(i)[j] * scale, + audio_bus_->channel(i)[j], epsilon_) << " i=" << i << ", j=" << j; return false; } @@ -257,8 +246,6 @@ class AudioRendererMixerTest } bool RenderAndValidateAudioData(float scale) { - int request_frames = output_parameters_.frames_per_buffer(); - // Half fill won't be exactly half when resampling since the resampler // will have enough data to fill out more of the buffer based on its // internal buffer and kernel size. So special case some of the checks. @@ -269,19 +256,16 @@ class AudioRendererMixerTest for (size_t i = 0; i < fake_callbacks_.size(); ++i) fake_callbacks_[i]->set_half_fill(true); expected_callback_->set_half_fill(true); - for (size_t i = 0; i < expected_audio_data_.size(); ++i) { - memset(expected_audio_data_[i], 0, - sizeof(*expected_audio_data_[i]) * request_frames); - } + expected_audio_bus_->Zero(); } // Render actual audio data. - int frames = mixer_callback_->Render(audio_data_, request_frames, 0); - if (frames != request_frames) + int frames = mixer_callback_->Render(audio_bus_.get(), 0); + if (frames != audio_bus_->frames()) return false; // Render expected audio data (without scaling). - expected_callback_->Render(expected_audio_data_, request_frames, 0); + expected_callback_->Render(expected_audio_bus_.get(), 0); if (half_fill_) { // Verify first half of audio data for both resampling and non-resampling. @@ -296,11 +280,12 @@ class AudioRendererMixerTest } } - // Fill |audio_data_| fully with |value|. + // Fill |audio_bus_| fully with |value|. void FillAudioData(float value) { - for (size_t i = 0; i < audio_data_.size(); ++i) - std::fill(audio_data_[i], - audio_data_[i] + output_parameters_.frames_per_buffer(), value); + for (int i = 0; i < audio_bus_->channels(); ++i) { + std::fill(audio_bus_->channel(i), + audio_bus_->channel(i) + audio_bus_->frames(), value); + } } // Verify silence when mixer inputs are in pre-Start() and post-Start(). @@ -414,26 +399,21 @@ class AudioRendererMixerTest mixer_inputs_[i]->Stop(); } - // Verify we get silence back; fill |audio_data_| before hand to be sure. + // Verify we get silence back; fill |audio_bus_| before hand to be sure. FillAudioData(1.0f); EXPECT_TRUE(RenderAndValidateAudioData(0.0f)); } protected: - virtual ~AudioRendererMixerTest() { - for (size_t i = 0; i < audio_data_.size(); ++i) - delete [] audio_data_[i]; - for (size_t i = 0; i < expected_audio_data_.size(); ++i) - delete [] expected_audio_data_[i]; - } + virtual ~AudioRendererMixerTest() {} scoped_refptr<MockAudioRendererSink> sink_; scoped_ptr<AudioRendererMixer> mixer_; AudioRendererSink::RenderCallback* mixer_callback_; AudioParameters input_parameters_; AudioParameters output_parameters_; - std::vector<float*> audio_data_; - std::vector<float*> expected_audio_data_; + scoped_ptr<AudioBus> audio_bus_; + scoped_ptr<AudioBus> expected_audio_bus_; std::vector< scoped_refptr<AudioRendererMixerInput> > mixer_inputs_; ScopedVector<FakeAudioRenderCallback> fake_callbacks_; scoped_ptr<FakeAudioRenderCallback> expected_callback_; diff --git a/media/base/audio_renderer_sink.h b/media/base/audio_renderer_sink.h index 8786fe1..c5f43df 100644 --- a/media/base/audio_renderer_sink.h +++ b/media/base/audio_renderer_sink.h @@ -9,6 +9,7 @@ #include "base/basictypes.h" #include "base/memory/ref_counted.h" #include "media/audio/audio_parameters.h" +#include "media/base/audio_bus.h" #include "media/base/media_export.h" namespace media { @@ -22,14 +23,9 @@ class AudioRendererSink public: class RenderCallback { public: - // Fills entire buffer of length |number_of_frames| but returns actual - // number of frames it got from its source (|number_of_frames| in case of - // continuous stream). That actual number of frames is passed to host - // together with PCM audio data and host is free to use or ignore it. - // TODO(crogers): use base:Callback instead. - virtual int Render(const std::vector<float*>& audio_data, - int number_of_frames, - int audio_delay_milliseconds) = 0; + // Attempts to completely fill all channels of |audio_bus|, returns actual + // number of frames filled. + virtual int Render(AudioBus* audio_bus, int audio_delay_milliseconds) = 0; // Signals an error has occurred. virtual void OnRenderError() = 0; diff --git a/media/base/fake_audio_render_callback.cc b/media/base/fake_audio_render_callback.cc index 15986fd..6adf569 100644 --- a/media/base/fake_audio_render_callback.cc +++ b/media/base/fake_audio_render_callback.cc @@ -19,21 +19,21 @@ FakeAudioRenderCallback::FakeAudioRenderCallback(double step) FakeAudioRenderCallback::~FakeAudioRenderCallback() {} -int FakeAudioRenderCallback::Render(const std::vector<float*>& audio_data, - int number_of_frames, +int FakeAudioRenderCallback::Render(AudioBus* audio_bus, int audio_delay_milliseconds) { + int number_of_frames = audio_bus->frames(); if (half_fill_) number_of_frames /= 2; // Fill first channel with a sine wave. for (int i = 0; i < number_of_frames; ++i) - audio_data[0][i] = sin(2 * M_PI * (x_ + step_ * i)); + audio_bus->channel(0)[i] = sin(2 * M_PI * (x_ + step_ * i)); x_ += number_of_frames * step_; // Copy first channel into the rest of the channels. - for (size_t i = 1; i < audio_data.size(); ++i) - memcpy(audio_data[i], audio_data[0], - number_of_frames * sizeof(*audio_data[0])); + for (int i = 1; i < audio_bus->channels(); ++i) + memcpy(audio_bus->channel(i), audio_bus->channel(0), + number_of_frames * sizeof(*audio_bus->channel(i))); return number_of_frames; } diff --git a/media/base/fake_audio_render_callback.h b/media/base/fake_audio_render_callback.h index b1a4e44..f6ce104 100644 --- a/media/base/fake_audio_render_callback.h +++ b/media/base/fake_audio_render_callback.h @@ -5,8 +5,6 @@ #ifndef MEDIA_BASE_FAKE_AUDIO_RENDER_CALLBACK_H_ #define MEDIA_BASE_FAKE_AUDIO_RENDER_CALLBACK_H_ -#include <vector> - #include "media/base/audio_renderer_sink.h" #include "testing/gmock/include/gmock/gmock.h" @@ -24,8 +22,7 @@ class FakeAudioRenderCallback : public AudioRendererSink::RenderCallback { // Renders a sine wave into the provided audio data buffer. If |half_fill_| // is set, will only fill half the buffer. - int Render(const std::vector<float*>& audio_data, int number_of_frames, - int audio_delay_milliseconds) OVERRIDE; + int Render(AudioBus* audio_bus, int audio_delay_milliseconds) OVERRIDE; MOCK_METHOD0(OnRenderError, void()); // Toggles only filling half the requested amount during Render(). diff --git a/media/base/multi_channel_resampler.cc b/media/base/multi_channel_resampler.cc index 23ab0eb..960a212 100644 --- a/media/base/multi_channel_resampler.cc +++ b/media/base/multi_channel_resampler.cc @@ -7,6 +7,7 @@ #include "base/bind.h" #include "base/bind_helpers.h" #include "base/logging.h" +#include "media/base/audio_bus.h" namespace media { @@ -14,7 +15,6 @@ MultiChannelResampler::MultiChannelResampler(int channels, double io_sample_rate_ratio, const ReadCB& read_cb) : last_frame_count_(0), - first_frame_count_(0), read_cb_(read_cb) { // Allocate each channel's resampler. resamplers_.reserve(channels); @@ -24,17 +24,10 @@ MultiChannelResampler::MultiChannelResampler(int channels, } } -MultiChannelResampler::~MultiChannelResampler() { - // Clean up |resampler_audio_data_|. Skip the first channel since we never - // allocated that, but just used the destination passed into ProvideInput(). - for (size_t i = 1; i < resampler_audio_data_.size(); ++i) - delete [] resampler_audio_data_[i]; - resampler_audio_data_.clear(); -} +MultiChannelResampler::~MultiChannelResampler() {} -void MultiChannelResampler::Resample(const std::vector<float*>& destination, - int frames) { - DCHECK_EQ(destination.size(), resamplers_.size()); +void MultiChannelResampler::Resample(AudioBus* audio_bus, int frames) { + DCHECK_EQ(static_cast<size_t>(audio_bus->channels()), resamplers_.size()); // We need to ensure that SincResampler only calls ProvideInput once for each // channel. To ensure this, we chunk the number of requested frames into @@ -55,7 +48,8 @@ void MultiChannelResampler::Resample(const std::vector<float*>& destination, // the first channel, then it will call it for the remaining channels, // since they all buffer in the same way and are processing the same // number of frames. - resamplers_[i]->Resample(destination[i] + frames_done, frames_this_time); + resamplers_[i]->Resample( + audio_bus->channel(i) + frames_done, frames_this_time); } frames_done += frames_this_time; @@ -66,32 +60,37 @@ void MultiChannelResampler::ProvideInput(int channel, float* destination, int frames) { // Get the data from the multi-channel provider when the first channel asks // for it. For subsequent channels, we can just dish out the channel data - // from that (stored in |resampler_audio_data_|). + // from that (stored in |resampler_audio_bus_|). if (channel == 0) { - // Allocate staging arrays on the first request. - if (resampler_audio_data_.size() == 0) { - first_frame_count_ = frames; - // Skip allocation of the first buffer, since we'll use |destination| - // directly for that. - resampler_audio_data_.reserve(resamplers_.size()); + // Allocate staging arrays on the first request and if the frame size or + // |destination| changes (should only happen once). + if (!resampler_audio_bus_.get() || + resampler_audio_bus_->frames() != frames || + wrapped_resampler_audio_bus_->channel(0) != destination) { + resampler_audio_bus_ = AudioBus::Create(resamplers_.size(), frames); + + // Create a channel vector based on |resampler_audio_bus_| but using + // |destination| directly for the first channel and then wrap it in a new + // AudioBus so we can avoid an extra memcpy later. + resampler_audio_data_.clear(); + resampler_audio_data_.reserve(resampler_audio_bus_->channels()); resampler_audio_data_.push_back(destination); - for (size_t i = 1; i < resamplers_.size(); ++i) - resampler_audio_data_.push_back(new float[frames]); - } else { - DCHECK_LE(frames, first_frame_count_); - resampler_audio_data_[0] = destination; + for (int i = 1; i < resampler_audio_bus_->channels(); ++i) + resampler_audio_data_.push_back(resampler_audio_bus_->channel(i)); + wrapped_resampler_audio_bus_ = AudioBus::WrapVector( + frames, resampler_audio_data_); } last_frame_count_ = frames; - read_cb_.Run(resampler_audio_data_, frames); + read_cb_.Run(wrapped_resampler_audio_bus_.get()); } else { // All channels must ask for the same amount. This should always be the // case, but let's just make sure. DCHECK_EQ(frames, last_frame_count_); // Copy the channel data from what we received from |read_cb_|. - memcpy(destination, resampler_audio_data_[channel], - sizeof(*resampler_audio_data_[channel]) * frames); + memcpy(destination, resampler_audio_bus_->channel(channel), + sizeof(*resampler_audio_bus_->channel(channel)) * frames); } } diff --git a/media/base/multi_channel_resampler.h b/media/base/multi_channel_resampler.h index db95bc5..6d65075 100644 --- a/media/base/multi_channel_resampler.h +++ b/media/base/multi_channel_resampler.h @@ -13,16 +13,16 @@ #include "media/base/sinc_resampler.h" namespace media { +class AudioBus; // MultiChannelResampler is a multi channel wrapper for SincResampler; allowing // high quality sample rate conversion of multiple channels at once. class MEDIA_EXPORT MultiChannelResampler { public: - // Callback type for providing more data into the resampler. Expects |frames| - // of data for all channels to be rendered into |destination|; zero padded if - // not enough frames are available to satisfy the request. - typedef base::Callback<void(const std::vector<float*>& destination, - int frames)> ReadCB; + // Callback type for providing more data into the resampler. Expects AudioBus + // to be completely filled with data upon return; zero padded if not enough + // frames are available to satisfy the request. + typedef base::Callback<void(AudioBus* audio_bus)> ReadCB; // Constructs a MultiChannelResampler with the specified |read_cb|, which is // used to acquire audio data for resampling. |io_sample_rate_ratio| is the @@ -31,8 +31,8 @@ class MEDIA_EXPORT MultiChannelResampler { const ReadCB& read_cb); virtual ~MultiChannelResampler(); - // Resample |frames| of data from |read_cb_| into |destination|. - void Resample(const std::vector<float*>& destination, int frames); + // Resamples |frames| of data from |read_cb_| into AudioBus. + void Resample(AudioBus* audio_bus, int frames); private: // SincResampler::ReadCB implementation. ProvideInput() will be called for @@ -43,17 +43,15 @@ class MEDIA_EXPORT MultiChannelResampler { // frames for every channel. int last_frame_count_; - // Sanity check to ensure |resampler_audio_data_| is properly allocated. - int first_frame_count_; - // Source of data for resampling. ReadCB read_cb_; // Each channel has its own high quality resampler. ScopedVector<SincResampler> resamplers_; - // Buffer for audio data going into SincResampler from ReadCB. Owned by this - // class and only temporarily passed out to ReadCB when data is required. + // Buffers for audio data going into SincResampler from ReadCB. + scoped_ptr<AudioBus> resampler_audio_bus_; + scoped_ptr<AudioBus> wrapped_resampler_audio_bus_; std::vector<float*> resampler_audio_data_; }; diff --git a/media/base/multi_channel_resampler_unittest.cc b/media/base/multi_channel_resampler_unittest.cc index 0475d76..d32396e 100644 --- a/media/base/multi_channel_resampler_unittest.cc +++ b/media/base/multi_channel_resampler_unittest.cc @@ -8,6 +8,7 @@ #include "base/bind_helpers.h" #include "base/logging.h" #include "base/memory/scoped_ptr.h" +#include "media/base/audio_bus.h" #include "media/base/multi_channel_resampler.h" #include "testing/gtest/include/gtest/gtest.h" @@ -37,33 +38,20 @@ class MultiChannelResamplerTest : public testing::TestWithParam<int> { public: MultiChannelResamplerTest() {} - virtual ~MultiChannelResamplerTest() { - if (!audio_data_.empty()) { - for (size_t i = 0; i < audio_data_.size(); ++i) - delete [] audio_data_[i]; - audio_data_.clear(); - } - } + virtual ~MultiChannelResamplerTest() {} void InitializeAudioData(int channels, int frames) { frames_ = frames; - audio_data_.reserve(channels); - for (int i = 0; i < channels; ++i) { - audio_data_.push_back(new float[frames]); - - // Zero initialize so we can be sure every value has been provided. - memset(audio_data_[i], 0, sizeof(*audio_data_[i]) * frames); - } + audio_bus_ = AudioBus::Create(channels, frames); } // MultiChannelResampler::MultiChannelAudioSourceProvider implementation, just // fills the provided audio_data with |kFillValue|. - virtual void ProvideInput(const std::vector<float*>& audio_data, - int number_of_frames) { - EXPECT_EQ(audio_data.size(), audio_data_.size()); - for (size_t i = 0; i < audio_data.size(); ++i) - for (int j = 0; j < number_of_frames; ++j) - audio_data[i][j] = kFillValue; + virtual void ProvideInput(AudioBus* audio_bus) { + EXPECT_EQ(audio_bus->channels(), audio_bus_->channels()); + for (int i = 0; i < audio_bus->channels(); ++i) + for (int j = 0; j < audio_bus->frames(); ++j) + audio_bus->channel(i)[j] = kFillValue; } void MultiChannelTest(int channels, int frames, double expected_max_rms_error, @@ -73,7 +61,7 @@ class MultiChannelResamplerTest channels, kScaleFactor, base::Bind( &MultiChannelResamplerTest::ProvideInput, base::Unretained(this))); - resampler.Resample(audio_data_, frames); + resampler.Resample(audio_bus_.get(), frames); TestValues(expected_max_rms_error, expected_max_error); } @@ -91,19 +79,19 @@ class MultiChannelResamplerTest // Calculate Root-Mean-Square-Error for the resampling. double max_error = 0.0; double sum_of_squares = 0.0; - for (size_t i = 0; i < audio_data_.size(); ++i) { + for (int i = 0; i < audio_bus_->channels(); ++i) { for (int j = 0; j < frames_; ++j) { // Ensure all values are accounted for. - ASSERT_NE(audio_data_[i][j], 0); + ASSERT_NE(audio_bus_->channel(i)[j], 0); - double error = fabs(audio_data_[i][j] - kFillValue); + double error = fabs(audio_bus_->channel(i)[j] - kFillValue); max_error = std::max(max_error, error); sum_of_squares += error * error; } } double rms_error = sqrt( - sum_of_squares / (frames_ * audio_data_.size())); + sum_of_squares / (frames_ * audio_bus_->channels())); EXPECT_LE(rms_error, expected_max_rms_error); EXPECT_LE(max_error, expected_max_error); @@ -111,7 +99,7 @@ class MultiChannelResamplerTest protected: int frames_; - std::vector<float*> audio_data_; + scoped_ptr<AudioBus> audio_bus_; DISALLOW_COPY_AND_ASSIGN(MultiChannelResamplerTest); }; diff --git a/media/base/sinc_resampler.h b/media/base/sinc_resampler.h index 4c55eab..029a38a 100644 --- a/media/base/sinc_resampler.h +++ b/media/base/sinc_resampler.h @@ -81,6 +81,8 @@ class MEDIA_EXPORT SincResampler { float* const r3_; float* const r4_; float* const r5_; + + DISALLOW_COPY_AND_ASSIGN(SincResampler); }; } // namespace media diff --git a/media/filters/audio_renderer_impl.cc b/media/filters/audio_renderer_impl.cc index 50bf16c..384935e 100644 --- a/media/filters/audio_renderer_impl.cc +++ b/media/filters/audio_renderer_impl.cc @@ -325,13 +325,11 @@ bool AudioRendererImpl::IsBeforePrerollTime( (buffer->GetTimestamp() + buffer->GetDuration()) < preroll_timestamp_; } -int AudioRendererImpl::Render(const std::vector<float*>& audio_data, - int number_of_frames, +int AudioRendererImpl::Render(AudioBus* audio_bus, int audio_delay_milliseconds) { if (stopped_ || GetPlaybackRate() == 0.0f) { // Output silence if stopped. - for (size_t i = 0; i < audio_data.size(); ++i) - memset(audio_data[i], 0, sizeof(float) * number_of_frames); + audio_bus->Zero(); return 0; } @@ -348,30 +346,29 @@ int AudioRendererImpl::Render(const std::vector<float*>& audio_data, int bytes_per_frame = audio_parameters_.GetBytesPerFrame(); - const int buf_size = number_of_frames * bytes_per_frame; + const int buf_size = audio_bus->frames() * bytes_per_frame; scoped_array<uint8> buf(new uint8[buf_size]); - int frames_filled = FillBuffer(buf.get(), number_of_frames, request_delay); + int frames_filled = FillBuffer(buf.get(), audio_bus->frames(), request_delay); int bytes_filled = frames_filled * bytes_per_frame; DCHECK_LE(bytes_filled, buf_size); UpdateEarliestEndTime(bytes_filled, request_delay, base::Time::Now()); // Deinterleave each audio channel. - int channels = audio_data.size(); + int channels = audio_bus->channels(); for (int channel_index = 0; channel_index < channels; ++channel_index) { media::DeinterleaveAudioChannel(buf.get(), - audio_data[channel_index], + audio_bus->channel(channel_index), channels, channel_index, bytes_per_frame / channels, frames_filled); // If FillBuffer() didn't give us enough data then zero out the remainder. - if (frames_filled < number_of_frames) { - int frames_to_zero = number_of_frames - frames_filled; - memset(audio_data[channel_index] + frames_filled, - 0, - sizeof(float) * frames_to_zero); + if (frames_filled < audio_bus->frames()) { + int frames_to_zero = audio_bus->frames() - frames_filled; + memset(audio_bus->channel(channel_index) + frames_filled, 0, + sizeof(*audio_bus->channel(channel_index)) * frames_to_zero); } } return frames_filled; diff --git a/media/filters/audio_renderer_impl.h b/media/filters/audio_renderer_impl.h index f6c72f9..68f3250 100644 --- a/media/filters/audio_renderer_impl.h +++ b/media/filters/audio_renderer_impl.h @@ -119,8 +119,7 @@ class MEDIA_EXPORT AudioRendererImpl void DoPause(); // media::AudioRendererSink::RenderCallback implementation. - virtual int Render(const std::vector<float*>& audio_data, - int number_of_frames, + virtual int Render(AudioBus* audio_bus, int audio_delay_milliseconds) OVERRIDE; virtual void OnRenderError() OVERRIDE; |