diff options
author | xians <xians@chromium.org> | 2014-08-29 08:17:28 -0700 |
---|---|---|
committer | Commit bot <commit-bot@chromium.org> | 2014-08-29 15:18:32 +0000 |
commit | ebae1d3f36f0139ed578e36e21e8ac372e9424f6 (patch) | |
tree | 51d4c00bcc376e4974a242e904562ad5f3ba2f42 /media | |
parent | 22aa58e63b325a6fbdea7f59ce9b0b6e63ac96cc (diff) | |
download | chromium_src-ebae1d3f36f0139ed578e36e21e8ac372e9424f6.zip chromium_src-ebae1d3f36f0139ed578e36e21e8ac372e9424f6.tar.gz chromium_src-ebae1d3f36f0139ed578e36e21e8ac372e9424f6.tar.bz2 |
Used native deinterleaved and float point format for the input streams.
If we call GetProperty of kAudioUnitProperty_StreamFormat before setting the format, the device will report kAudioFormatFlagsNativeFloatPacked | kLinearPCMFormatFlagIsNonInterleaved as the native format of the device, which is the same as the output.
This patch changes the format to use kAudioFormatFlagsNativeFloatPacked | kLinearPCMFormatFlagIsNonInterleaved to open the device, so that we will avoid format flipping back and forth. Hope this optimization will help increase the stability of the input audio on Mac.
TBR=DaleCurtis@chromium.org
BUG=404884
TEST=media_unittests && https://webrtc.googlecode.com/svn-history/r5497/trunk/samples/js/demos/html/pc1.html, https://www.google.com/intl/en/chrome/demos/speech.html
Review URL: https://codereview.chromium.org/510073002
Cr-Commit-Position: refs/heads/master@{#292636}
Diffstat (limited to 'media')
-rw-r--r-- | media/audio/mac/audio_low_latency_input_mac.cc | 221 | ||||
-rw-r--r-- | media/audio/mac/audio_low_latency_input_mac.h | 19 | ||||
-rw-r--r-- | media/base/audio_block_fifo.cc | 46 | ||||
-rw-r--r-- | media/base/audio_block_fifo.h | 7 | ||||
-rw-r--r-- | media/base/audio_block_fifo_unittest.cc | 69 |
5 files changed, 227 insertions, 135 deletions
diff --git a/media/audio/mac/audio_low_latency_input_mac.cc b/media/audio/mac/audio_low_latency_input_mac.cc index f1dbdf7..d3585c9 100644 --- a/media/audio/mac/audio_low_latency_input_mac.cc +++ b/media/audio/mac/audio_low_latency_input_mac.cc @@ -10,6 +10,7 @@ #include "base/logging.h" #include "base/mac/mac_logging.h" #include "media/audio/mac/audio_manager_mac.h" +#include "media/base/audio_block_fifo.h" #include "media/base/audio_bus.h" #include "media/base/data_buffer.h" @@ -31,6 +32,23 @@ static std::ostream& operator<<(std::ostream& os, return os; } +static void WrapBufferList(AudioBufferList* buffer_list, + AudioBus* bus, + int frames) { + DCHECK(buffer_list); + DCHECK(bus); + const int channels = bus->channels(); + const int buffer_list_channels = buffer_list->mNumberBuffers; + CHECK_EQ(channels, buffer_list_channels); + + // Copy pointers from AudioBufferList. + for (int i = 0; i < channels; ++i) + bus->SetChannelData(i, static_cast<float*>(buffer_list->mBuffers[i].mData)); + + // Finally set the actual length. + bus->set_frames(frames); +} + // See "Technical Note TN2091 - Device input using the HAL Output Audio Unit" // http://developer.apple.com/library/mac/#technotes/tn2091/_index.html // for more details and background regarding this implementation. @@ -46,43 +64,46 @@ AUAudioInputStream::AUAudioInputStream(AudioManagerMac* manager, started_(false), hardware_latency_frames_(0), number_of_channels_in_frame_(0), - fifo_(input_params.channels(), - number_of_frames_, - kNumberOfBlocksBufferInFifo) { + audio_wrapper_(AudioBus::CreateWrapper(input_params.channels())) { DCHECK(manager_); // Set up the desired (output) format specified by the client. format_.mSampleRate = input_params.sample_rate(); format_.mFormatID = kAudioFormatLinearPCM; - format_.mFormatFlags = kLinearPCMFormatFlagIsPacked | - kLinearPCMFormatFlagIsSignedInteger; - format_.mBitsPerChannel = input_params.bits_per_sample(); + format_.mFormatFlags = + kAudioFormatFlagsNativeFloatPacked | kLinearPCMFormatFlagIsNonInterleaved; + size_t bytes_per_sample = sizeof(Float32); + format_.mBitsPerChannel = bytes_per_sample * 8; format_.mChannelsPerFrame = input_params.channels(); - format_.mFramesPerPacket = 1; // uncompressed audio - format_.mBytesPerPacket = (format_.mBitsPerChannel * - input_params.channels()) / 8; - format_.mBytesPerFrame = format_.mBytesPerPacket; + format_.mFramesPerPacket = 1; + format_.mBytesPerFrame = bytes_per_sample; + format_.mBytesPerPacket = format_.mBytesPerFrame * format_.mFramesPerPacket; format_.mReserved = 0; DVLOG(1) << "Desired ouput format: " << format_; - // Derive size (in bytes) of the buffers that we will render to. - UInt32 data_byte_size = number_of_frames_ * format_.mBytesPerFrame; - DVLOG(1) << "Size of data buffer in bytes : " << data_byte_size; + // Allocate AudioBufferList based on the number of channels. + audio_buffer_list_.reset(static_cast<AudioBufferList*>( + malloc(sizeof(AudioBufferList) * input_params.channels()))); + audio_buffer_list_->mNumberBuffers = input_params.channels(); // Allocate AudioBuffers to be used as storage for the received audio. // The AudioBufferList structure works as a placeholder for the // AudioBuffer structure, which holds a pointer to the actual data buffer. - audio_data_buffer_.reset(new uint8[data_byte_size]); - audio_buffer_list_.mNumberBuffers = 1; - - AudioBuffer* audio_buffer = audio_buffer_list_.mBuffers; - audio_buffer->mNumberChannels = input_params.channels(); - audio_buffer->mDataByteSize = data_byte_size; - audio_buffer->mData = audio_data_buffer_.get(); + UInt32 data_byte_size = number_of_frames_ * format_.mBytesPerFrame; + audio_data_buffer_.reset(static_cast<float*>(base::AlignedAlloc( + data_byte_size * audio_buffer_list_->mNumberBuffers, + AudioBus::kChannelAlignment))); + AudioBuffer* audio_buffer = audio_buffer_list_->mBuffers; + for (UInt32 i = 0; i < audio_buffer_list_->mNumberBuffers; ++i) { + audio_buffer[i].mNumberChannels = 1; + audio_buffer[i].mDataByteSize = data_byte_size; + audio_buffer[i].mData = audio_data_buffer_.get() + i * data_byte_size; + } } -AUAudioInputStream::~AUAudioInputStream() {} +AUAudioInputStream::~AUAudioInputStream() { +} // Obtain and open the AUHAL AudioOutputUnit for recording. bool AUAudioInputStream::Open() { @@ -165,23 +186,6 @@ bool AUAudioInputStream::Open() { return false; } - // Register the input procedure for the AUHAL. - // This procedure will be called when the AUHAL has received new data - // from the input device. - AURenderCallbackStruct callback; - callback.inputProc = InputProc; - callback.inputProcRefCon = this; - result = AudioUnitSetProperty(audio_unit_, - kAudioOutputUnitProperty_SetInputCallback, - kAudioUnitScope_Global, - 0, - &callback, - sizeof(callback)); - if (result) { - HandleError(result); - return false; - } - // Set up the the desired (output) format. // For obtaining input from a device, the device format is always expressed // on the output scope of the AUHAL's Element 1. @@ -229,6 +233,23 @@ bool AUAudioInputStream::Open() { } } + // Register the input procedure for the AUHAL. + // This procedure will be called when the AUHAL has received new data + // from the input device. + AURenderCallbackStruct callback; + callback.inputProc = InputProc; + callback.inputProcRefCon = this; + result = AudioUnitSetProperty(audio_unit_, + kAudioOutputUnitProperty_SetInputCallback, + kAudioUnitScope_Global, + 0, + &callback, + sizeof(callback)); + if (result) { + HandleError(result); + return false; + } + // Finally, initialize the audio unit and ensure that it is ready to render. // Allocates memory according to the maximum number of audio frames // it can produce in response to a single render call. @@ -342,9 +363,9 @@ void AUAudioInputStream::SetVolume(double volume) { Float32 volume_float32 = static_cast<Float32>(volume); AudioObjectPropertyAddress property_address = { - kAudioDevicePropertyVolumeScalar, - kAudioDevicePropertyScopeInput, - kAudioObjectPropertyElementMaster + kAudioDevicePropertyVolumeScalar, + kAudioDevicePropertyScopeInput, + kAudioObjectPropertyElementMaster }; // Try to set the volume for master volume channel. @@ -390,15 +411,15 @@ void AUAudioInputStream::SetVolume(double volume) { double AUAudioInputStream::GetVolume() { // Verify that we have a valid device. - if (input_device_id_ == kAudioObjectUnknown){ + if (input_device_id_ == kAudioObjectUnknown) { NOTREACHED() << "Device ID is unknown"; return 0.0; } AudioObjectPropertyAddress property_address = { - kAudioDevicePropertyVolumeScalar, - kAudioDevicePropertyScopeInput, - kAudioObjectPropertyElementMaster + kAudioDevicePropertyVolumeScalar, + kAudioDevicePropertyScopeInput, + kAudioObjectPropertyElementMaster }; if (AudioObjectHasProperty(input_device_id_, &property_address)) { @@ -406,12 +427,8 @@ double AUAudioInputStream::GetVolume() { // master channel. Float32 volume_float32 = 0.0; UInt32 size = sizeof(volume_float32); - OSStatus result = AudioObjectGetPropertyData(input_device_id_, - &property_address, - 0, - NULL, - &size, - &volume_float32); + OSStatus result = AudioObjectGetPropertyData( + input_device_id_, &property_address, 0, NULL, &size, &volume_float32); if (result == noErr) return static_cast<double>(volume_float32); } else { @@ -472,9 +489,8 @@ OSStatus AUAudioInputStream::InputProc(void* user_data, return result; // Deliver recorded data to the consumer as a callback. - return audio_input->Provide(number_of_frames, - audio_input->audio_buffer_list(), - time_stamp); + return audio_input->Provide( + number_of_frames, audio_input->audio_buffer_list(), time_stamp); } OSStatus AUAudioInputStream::Provide(UInt32 number_of_frames, @@ -491,22 +507,42 @@ OSStatus AUAudioInputStream::Provide(UInt32 number_of_frames, AudioBuffer& buffer = io_data->mBuffers[0]; uint8* audio_data = reinterpret_cast<uint8*>(buffer.mData); - uint32 capture_delay_bytes = static_cast<uint32> - ((capture_latency_frames + 0.5) * format_.mBytesPerFrame); + uint32 capture_delay_bytes = static_cast<uint32>( + (capture_latency_frames + 0.5) * format_.mBytesPerFrame); DCHECK(audio_data); if (!audio_data) return kAudioUnitErr_InvalidElement; - // Copy captured (and interleaved) data into FIFO. - fifo_.Push(audio_data, number_of_frames, format_.mBitsPerChannel / 8); + // Wrap the output AudioBufferList to |audio_wrapper_|. + WrapBufferList(io_data, audio_wrapper_.get(), number_of_frames); + + // If the stream parameters change for any reason, we need to insert a FIFO + // since the OnMoreData() pipeline can't handle frame size changes. + if (number_of_frames != number_of_frames_) { + // Create a FIFO on the fly to handle any discrepancies in callback rates. + if (!fifo_) { + fifo_.reset(new AudioBlockFifo(audio_wrapper_->channels(), + number_of_frames_, + kNumberOfBlocksBufferInFifo)); + } + } + // When FIFO does not kick in, data will be directly passed to the callback. + if (!fifo_) { + CHECK_EQ(audio_wrapper_->frames(), static_cast<int>(number_of_frames_)); + sink_->OnData( + this, audio_wrapper_.get(), capture_delay_bytes, normalized_volume); + return noErr; + } + + // Compensate the audio delay caused by the FIFO. + capture_delay_bytes += fifo_->GetAvailableFrames() * format_.mBytesPerFrame; + + fifo_->Push(audio_wrapper_.get()); // Consume and deliver the data when the FIFO has a block of available data. - while (fifo_.available_blocks()) { - const AudioBus* audio_bus = fifo_.Consume(); + while (fifo_->available_blocks()) { + const AudioBus* audio_bus = fifo_->Consume(); DCHECK_EQ(audio_bus->frames(), static_cast<int>(number_of_frames_)); - - // Compensate the audio delay caused by the FIFO. - capture_delay_bytes += fifo_.GetAvailableFrames() * format_.mBytesPerFrame; sink_->OnData(this, audio_bus, capture_delay_bytes, normalized_volume); } @@ -519,9 +555,9 @@ int AUAudioInputStream::HardwareSampleRate() { UInt32 info_size = sizeof(device_id); AudioObjectPropertyAddress default_input_device_address = { - kAudioHardwarePropertyDefaultInputDevice, - kAudioObjectPropertyScopeGlobal, - kAudioObjectPropertyElementMaster + kAudioHardwarePropertyDefaultInputDevice, + kAudioObjectPropertyScopeGlobal, + kAudioObjectPropertyElementMaster }; OSStatus result = AudioObjectGetPropertyData(kAudioObjectSystemObject, &default_input_device_address, @@ -536,10 +572,8 @@ int AUAudioInputStream::HardwareSampleRate() { info_size = sizeof(nominal_sample_rate); AudioObjectPropertyAddress nominal_sample_rate_address = { - kAudioDevicePropertyNominalSampleRate, - kAudioObjectPropertyScopeGlobal, - kAudioObjectPropertyElementMaster - }; + kAudioDevicePropertyNominalSampleRate, kAudioObjectPropertyScopeGlobal, + kAudioObjectPropertyElementMaster}; result = AudioObjectGetPropertyData(device_id, &nominal_sample_rate_address, 0, @@ -572,9 +606,9 @@ double AUAudioInputStream::GetHardwareLatency() { // Get input audio device latency. AudioObjectPropertyAddress property_address = { - kAudioDevicePropertyLatency, - kAudioDevicePropertyScopeInput, - kAudioObjectPropertyElementMaster + kAudioDevicePropertyLatency, + kAudioDevicePropertyScopeInput, + kAudioObjectPropertyElementMaster }; UInt32 device_latency_frames = 0; size = sizeof(device_latency_frames); @@ -586,19 +620,19 @@ double AUAudioInputStream::GetHardwareLatency() { &device_latency_frames); DLOG_IF(WARNING, result != noErr) << "Could not get audio device latency."; - return static_cast<double>((audio_unit_latency_sec * - format_.mSampleRate) + device_latency_frames); + return static_cast<double>((audio_unit_latency_sec * format_.mSampleRate) + + device_latency_frames); } double AUAudioInputStream::GetCaptureLatency( const AudioTimeStamp* input_time_stamp) { // Get the delay between between the actual recording instant and the time // when the data packet is provided as a callback. - UInt64 capture_time_ns = AudioConvertHostTimeToNanos( - input_time_stamp->mHostTime); + UInt64 capture_time_ns = + AudioConvertHostTimeToNanos(input_time_stamp->mHostTime); UInt64 now_ns = AudioConvertHostTimeToNanos(AudioGetCurrentHostTime()); - double delay_frames = static_cast<double> - (1e-9 * (now_ns - capture_time_ns) * format_.mSampleRate); + double delay_frames = static_cast<double>(1e-9 * (now_ns - capture_time_ns) * + format_.mSampleRate); // Total latency is composed by the dynamic latency and the fixed // hardware latency. @@ -608,18 +642,14 @@ double AUAudioInputStream::GetCaptureLatency( int AUAudioInputStream::GetNumberOfChannelsFromStream() { // Get the stream format, to be able to read the number of channels. AudioObjectPropertyAddress property_address = { - kAudioDevicePropertyStreamFormat, - kAudioDevicePropertyScopeInput, - kAudioObjectPropertyElementMaster + kAudioDevicePropertyStreamFormat, + kAudioDevicePropertyScopeInput, + kAudioObjectPropertyElementMaster }; AudioStreamBasicDescription stream_format; UInt32 size = sizeof(stream_format); - OSStatus result = AudioObjectGetPropertyData(input_device_id_, - &property_address, - 0, - NULL, - &size, - &stream_format); + OSStatus result = AudioObjectGetPropertyData( + input_device_id_, &property_address, 0, NULL, &size, &stream_format); if (result != noErr) { DLOG(WARNING) << "Could not get stream format"; return 0; @@ -629,8 +659,8 @@ int AUAudioInputStream::GetNumberOfChannelsFromStream() { } void AUAudioInputStream::HandleError(OSStatus err) { - NOTREACHED() << "error " << GetMacOSStatusErrorString(err) - << " (" << err << ")"; + NOTREACHED() << "error " << GetMacOSStatusErrorString(err) << " (" << err + << ")"; if (sink_) sink_->OnError(this); } @@ -638,13 +668,12 @@ void AUAudioInputStream::HandleError(OSStatus err) { bool AUAudioInputStream::IsVolumeSettableOnChannel(int channel) { Boolean is_settable = false; AudioObjectPropertyAddress property_address = { - kAudioDevicePropertyVolumeScalar, - kAudioDevicePropertyScopeInput, - static_cast<UInt32>(channel) + kAudioDevicePropertyVolumeScalar, + kAudioDevicePropertyScopeInput, + static_cast<UInt32>(channel) }; - OSStatus result = AudioObjectIsPropertySettable(input_device_id_, - &property_address, - &is_settable); + OSStatus result = AudioObjectIsPropertySettable( + input_device_id_, &property_address, &is_settable); return (result == noErr) ? is_settable : false; } diff --git a/media/audio/mac/audio_low_latency_input_mac.h b/media/audio/mac/audio_low_latency_input_mac.h index c8e43fa..53d8cb4 100644 --- a/media/audio/mac/audio_low_latency_input_mac.h +++ b/media/audio/mac/audio_low_latency_input_mac.h @@ -45,10 +45,10 @@ #include "media/audio/agc_audio_stream.h" #include "media/audio/audio_io.h" #include "media/audio/audio_parameters.h" -#include "media/base/audio_block_fifo.h" namespace media { +class AudioBlockFifo; class AudioBus; class AudioManagerMac; class DataBuffer; @@ -78,7 +78,7 @@ class AUAudioInputStream : public AgcAudioStream<AudioInputStream> { bool started() const { return started_; } AudioUnit audio_unit() { return audio_unit_; } - AudioBufferList* audio_buffer_list() { return &audio_buffer_list_; } + AudioBufferList* audio_buffer_list() { return audio_buffer_list_.get(); } private: // AudioOutputUnit callback. @@ -90,7 +90,8 @@ class AUAudioInputStream : public AgcAudioStream<AudioInputStream> { AudioBufferList* io_data); // Pushes recorded data to consumer of the input audio stream. - OSStatus Provide(UInt32 number_of_frames, AudioBufferList* io_data, + OSStatus Provide(UInt32 number_of_frames, + AudioBufferList* io_data, const AudioTimeStamp* time_stamp); // Gets the fixed capture hardware latency and store it during initialization. @@ -132,11 +133,11 @@ class AUAudioInputStream : public AgcAudioStream<AudioInputStream> { AudioDeviceID input_device_id_; // Provides a mechanism for encapsulating one or more buffers of audio data. - AudioBufferList audio_buffer_list_; + scoped_ptr<AudioBufferList, base::FreeDeleter> audio_buffer_list_; // Temporary storage for recorded data. The InputProc() renders into this // array as soon as a frame of the desired buffer size has been recorded. - scoped_ptr<uint8[]> audio_data_buffer_; + scoped_ptr<float, base::AlignedFreeDeleter> audio_data_buffer_; // True after successfull Start(), false after successful Stop(). bool started_; @@ -148,8 +149,12 @@ class AUAudioInputStream : public AgcAudioStream<AudioInputStream> { // when querying the volume of each channel. int number_of_channels_in_frame_; - // FIFO used to accumulates recorded data. - media::AudioBlockFifo fifo_; + // Dynamically allocated FIFO used when CoreAudio asks for unexpected frame + // sizes. + scoped_ptr<AudioBlockFifo> fifo_; + + // AudioBus wrapper for delievering data via AudioSourceCallback::OnData(). + scoped_ptr<AudioBus> audio_wrapper_; // Used to defer Start() to workaround http://crbug.com/160920. base::CancelableClosure deferred_start_cb_; diff --git a/media/base/audio_block_fifo.cc b/media/base/audio_block_fifo.cc index 5e8c9b3..9634574 100644 --- a/media/base/audio_block_fifo.cc +++ b/media/base/audio_block_fifo.cc @@ -22,7 +22,8 @@ AudioBlockFifo::AudioBlockFifo(int channels, int frames, int blocks) } } -AudioBlockFifo::~AudioBlockFifo() {} +AudioBlockFifo::~AudioBlockFifo() { +} void AudioBlockFifo::Push(const void* source, int frames, @@ -46,20 +47,39 @@ void AudioBlockFifo::Push(const void* source, // Deinterleave the content to the FIFO and update the |write_pos_|. current_block->FromInterleavedPartial( source_ptr, write_pos_, push_frames, bytes_per_sample); - write_pos_ = (write_pos_ + push_frames) % block_frames_; - if (!write_pos_) { - // The current block is completely filled, increment |write_block_| and - // |available_blocks_|. - write_block_ = (write_block_ + 1) % audio_blocks_.size(); - ++available_blocks_; - } + UpdatePosition(push_frames); source_ptr += push_frames * bytes_per_sample * current_block->channels(); frames_to_push -= push_frames; DCHECK_GE(frames_to_push, 0); } } +void AudioBlockFifo::Push(const AudioBus* source) { + DCHECK(source); + DCHECK_LT(available_blocks_, static_cast<int>(audio_blocks_.size())); + + int source_start_frame = 0; + while (source_start_frame < source->frames()) { + // Get the current write block. + AudioBus* current_block = audio_blocks_[write_block_]; + DCHECK_EQ(source->channels(), current_block->channels()); + + // Figure out what segment sizes we need when adding the new content to + // the FIFO. + const int push_frames = std::min(block_frames_ - write_pos_, + source->frames() - source_start_frame); + + // Copy the data to FIFO. + source->CopyPartialFramesTo( + source_start_frame, push_frames, write_pos_, current_block); + + UpdatePosition(push_frames); + source_start_frame += push_frames; + DCHECK_LE(source_start_frame, source->frames()); + } +} + const AudioBus* AudioBlockFifo::Consume() { DCHECK(available_blocks_); AudioBus* audio_bus = audio_blocks_[read_block_]; @@ -86,4 +106,14 @@ int AudioBlockFifo::GetUnfilledFrames() const { return unfilled_frames; } +void AudioBlockFifo::UpdatePosition(int push_frames) { + write_pos_ = (write_pos_ + push_frames) % block_frames_; + if (!write_pos_) { + // The current block is completely filled, increment |write_block_| and + // |available_blocks_|. + write_block_ = (write_block_ + 1) % audio_blocks_.size(); + ++available_blocks_; + } +} + } // namespace media diff --git a/media/base/audio_block_fifo.h b/media/base/audio_block_fifo.h index fdb5cef..56a2fe0 100644 --- a/media/base/audio_block_fifo.h +++ b/media/base/audio_block_fifo.h @@ -28,6 +28,10 @@ class MEDIA_EXPORT AudioBlockFifo { // Push() will crash if the allocated space is insufficient. void Push(const void* source, int frames, int bytes_per_sample); + // Pushes the audio data from |source| to the FIFO. + // Push() will crash if the allocated space is insufficient. + void Push(const AudioBus* source); + // Consumes a block of audio from the FIFO. Returns an AudioBus which // contains the consumed audio data to avoid copying. // Consume() will crash if the FIFO does not contain a block of data. @@ -46,6 +50,9 @@ class MEDIA_EXPORT AudioBlockFifo { int GetUnfilledFrames() const; private: + // Helper method to update the indexes in Push methods. + void UpdatePosition(int push_frames); + // The actual FIFO is a vector of audio buses. ScopedVector<AudioBus> audio_blocks_; diff --git a/media/base/audio_block_fifo_unittest.cc b/media/base/audio_block_fifo_unittest.cc index 8e8b5e0..f1ed228 100644 --- a/media/base/audio_block_fifo_unittest.cc +++ b/media/base/audio_block_fifo_unittest.cc @@ -8,29 +8,48 @@ namespace media { class AudioBlockFifoTest : public testing::Test { - public: + protected: AudioBlockFifoTest() {} virtual ~AudioBlockFifoTest() {} - void PushAndVerify(AudioBlockFifo* fifo, int frames_to_push, - int channels, int block_frames, int max_frames) { + private: + DISALLOW_COPY_AND_ASSIGN(AudioBlockFifoTest); +}; + +class AudioBlockFifoFormatTest : public AudioBlockFifoTest, + public testing::WithParamInterface<bool> { + protected: + void PushAndVerify(AudioBlockFifo* fifo, + int frames_to_push, + int channels, + int block_frames, + int max_frames) { const int bytes_per_sample = 2; const int data_byte_size = bytes_per_sample * channels * frames_to_push; - scoped_ptr<uint8[]> data(new uint8[data_byte_size]); - memset(data.get(), 0, data_byte_size); - - for (int filled_frames = max_frames - fifo->GetUnfilledFrames(); - filled_frames + frames_to_push <= max_frames;) { - fifo->Push(data.get(), frames_to_push, bytes_per_sample); - filled_frames += frames_to_push; - EXPECT_EQ(max_frames - filled_frames, fifo->GetUnfilledFrames()); - EXPECT_EQ(static_cast<int>(filled_frames / block_frames), - fifo->available_blocks()); + if (GetParam()) { + scoped_ptr<media::AudioBus> data = + AudioBus::Create(channels, frames_to_push); + for (int filled_frames = max_frames - fifo->GetUnfilledFrames(); + filled_frames + frames_to_push <= max_frames;) { + fifo->Push(data.get()); + filled_frames += frames_to_push; + EXPECT_EQ(max_frames - filled_frames, fifo->GetUnfilledFrames()); + EXPECT_EQ(static_cast<int>(filled_frames / block_frames), + fifo->available_blocks()); + } + } else { + scoped_ptr<uint8[]> data(new uint8[data_byte_size]); + memset(data.get(), 0, data_byte_size); + for (int filled_frames = max_frames - fifo->GetUnfilledFrames(); + filled_frames + frames_to_push <= max_frames;) { + fifo->Push(data.get(), frames_to_push, bytes_per_sample); + filled_frames += frames_to_push; + EXPECT_EQ(max_frames - filled_frames, fifo->GetUnfilledFrames()); + EXPECT_EQ(static_cast<int>(filled_frames / block_frames), + fifo->available_blocks()); + } } } - - private: - DISALLOW_COPY_AND_ASSIGN(AudioBlockFifoTest); }; // Verify that construction works as intended. @@ -44,7 +63,7 @@ TEST_F(AudioBlockFifoTest, Construct) { } // Pushes audio bus objects to/from a FIFO up to different degrees. -TEST_F(AudioBlockFifoTest, Push) { +TEST_P(AudioBlockFifoFormatTest, Push) { const int channels = 2; const int frames = 128; const int blocks = 2; @@ -65,7 +84,7 @@ TEST_F(AudioBlockFifoTest, Push) { // Perform a sequence of Push/Consume calls to different degrees, and verify // things are correct. -TEST_F(AudioBlockFifoTest, PushAndConsume) { +TEST_P(AudioBlockFifoFormatTest, PushAndConsume) { const int channels = 2; const int frames = 441; const int blocks = 4; @@ -100,10 +119,9 @@ TEST_F(AudioBlockFifoTest, PushAndConsume) { fifo.Clear(); int new_push_frames = 128; // Change the input frame and try to fill up the FIFO. - PushAndVerify(&fifo, new_push_frames, channels, frames, - frames * blocks); + PushAndVerify(&fifo, new_push_frames, channels, frames, frames * blocks); EXPECT_TRUE(fifo.GetUnfilledFrames() != 0); - EXPECT_TRUE(fifo.available_blocks() == blocks -1); + EXPECT_TRUE(fifo.available_blocks() == blocks - 1); // Consume all the existing filled blocks of data. while (fifo.available_blocks()) { @@ -122,14 +140,13 @@ TEST_F(AudioBlockFifoTest, PushAndConsume) { // Completely fill up the buffer again. new_push_frames = frames * blocks - remain_frames; - PushAndVerify(&fifo, new_push_frames, channels, frames, - frames * blocks); + PushAndVerify(&fifo, new_push_frames, channels, frames, frames * blocks); EXPECT_TRUE(fifo.GetUnfilledFrames() == 0); EXPECT_TRUE(fifo.available_blocks() == blocks); } // Perform a sequence of Push/Consume calls to a 1 block FIFO. -TEST_F(AudioBlockFifoTest, PushAndConsumeOneBlockFifo) { +TEST_P(AudioBlockFifoFormatTest, PushAndConsumeOneBlockFifo) { static const int channels = 2; static const int frames = 441; static const int blocks = 1; @@ -146,4 +163,8 @@ TEST_F(AudioBlockFifoTest, PushAndConsumeOneBlockFifo) { EXPECT_TRUE(fifo.GetUnfilledFrames() == frames); } +INSTANTIATE_TEST_CASE_P(AudioBlockFifoTests, + AudioBlockFifoFormatTest, + ::testing::Values(false, true)); + } // namespace media |