diff options
author | vrk@google.com <vrk@google.com@0039d316-1c4b-4281-b951-d872f2087c98> | 2012-01-10 19:14:03 +0000 |
---|---|---|
committer | vrk@google.com <vrk@google.com@0039d316-1c4b-4281-b951-d872f2087c98> | 2012-01-10 19:14:03 +0000 |
commit | a164c1c728da3900206957e5362868d3b5f46f20 (patch) | |
tree | 5308fe5fff9db236a08efa6cc785f9647f375ef1 | |
parent | db89440b21bf4950a18f4479aaa961cfe13de035 (diff) | |
download | chromium_src-a164c1c728da3900206957e5362868d3b5f46f20.zip chromium_src-a164c1c728da3900206957e5362868d3b5f46f20.tar.gz chromium_src-a164c1c728da3900206957e5362868d3b5f46f20.tar.bz2 |
Clean up AudioRendererAlgorithmBase
The logic in AudioRendererAlgorithmBase is pretty convoluted and confusing.
This CL attempts to clean up code while preserving the existing behavior.
BUG=106492
TEST=media_unittests
Review URL: http://codereview.chromium.org/8873049
git-svn-id: svn://svn.chromium.org/chrome/trunk/src@117072 0039d316-1c4b-4281-b951-d872f2087c98
-rw-r--r-- | media/audio/audio_util.cc | 49 | ||||
-rw-r--r-- | media/audio/audio_util.h | 9 | ||||
-rw-r--r-- | media/filters/audio_renderer_algorithm_base.cc | 402 | ||||
-rw-r--r-- | media/filters/audio_renderer_algorithm_base.h | 119 | ||||
-rw-r--r-- | media/filters/audio_renderer_algorithm_base_unittest.cc | 28 | ||||
-rw-r--r-- | media/filters/audio_renderer_base_unittest.cc | 4 |
6 files changed, 295 insertions, 316 deletions
diff --git a/media/audio/audio_util.cc b/media/audio/audio_util.cc index 9e1c818..8989eb9 100644 --- a/media/audio/audio_util.cc +++ b/media/audio/audio_util.cc @@ -1,4 +1,4 @@ -// Copyright (c) 2011 The Chromium Authors. All rights reserved. +// Copyright (c) 2012 The Chromium Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. @@ -49,6 +49,30 @@ static void AdjustVolume(Format* buf_out, } } +// Type is the datatype of a data point in the waveform (i.e. uint8, int16, +// int32, etc). +template <class Type> +static void DoCrossfade(int bytes_to_crossfade, int number_of_channels, + int bytes_per_channel, const Type* src, Type* dest) { + DCHECK_EQ(sizeof(Type), static_cast<size_t>(bytes_per_channel)); + int number_of_samples = + bytes_to_crossfade / (bytes_per_channel * number_of_channels); + + const Type* dest_end = dest + number_of_samples * number_of_channels; + const Type* src_end = src + number_of_samples * number_of_channels; + + for (int i = 0; i < number_of_samples; ++i) { + double crossfade_ratio = static_cast<double>(i) / number_of_samples; + for (int j = 0; j < number_of_channels; ++j) { + DCHECK_LT(dest, dest_end); + DCHECK_LT(src, src_end); + *dest = (*dest) * (1.0 - crossfade_ratio) + (*src) * crossfade_ratio; + ++src; + ++dest; + } + } +} + static const int kChannel_L = 0; static const int kChannel_R = 1; static const int kChannel_C = 2; @@ -373,4 +397,27 @@ bool IsWASAPISupported() { #endif +void Crossfade(int bytes_to_crossfade, int number_of_channels, + int bytes_per_channel, const uint8* src, uint8* dest) { + // TODO(vrk): The type punning below is no good! + switch (bytes_per_channel) { + case 4: + DoCrossfade(bytes_to_crossfade, number_of_channels, bytes_per_channel, + reinterpret_cast<const int32*>(src), + reinterpret_cast<int32*>(dest)); + break; + case 2: + DoCrossfade(bytes_to_crossfade, number_of_channels, bytes_per_channel, + reinterpret_cast<const int16*>(src), + reinterpret_cast<int16*>(dest)); + break; + case 1: + DoCrossfade(bytes_to_crossfade, number_of_channels, bytes_per_channel, + src, dest); + break; + default: + NOTREACHED() << "Unsupported audio bit depth in crossfade."; + } +} + } // namespace media diff --git a/media/audio/audio_util.h b/media/audio/audio_util.h index 22aa19b..278779a 100644 --- a/media/audio/audio_util.h +++ b/media/audio/audio_util.h @@ -1,4 +1,4 @@ -// Copyright (c) 2011 The Chromium Authors. All rights reserved. +// Copyright (c) 2012 The Chromium Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. @@ -113,6 +113,13 @@ MEDIA_EXPORT bool IsWASAPISupported(); #endif // defined(OS_WIN) +// Crossfades |bytes_to_crossfade| bytes of data in |dest| with the +// data in |src|. Assumes there is room in |dest| and enough data in |src|. +MEDIA_EXPORT void Crossfade(int bytes_to_crossfade, int number_of_channels, + int bytes_per_channel, const uint8* src, + uint8* dest); + + } // namespace media #endif // MEDIA_AUDIO_AUDIO_UTIL_H_ diff --git a/media/filters/audio_renderer_algorithm_base.cc b/media/filters/audio_renderer_algorithm_base.cc index 25b6a29..8bc742d 100644 --- a/media/filters/audio_renderer_algorithm_base.cc +++ b/media/filters/audio_renderer_algorithm_base.cc @@ -1,4 +1,4 @@ -// Copyright (c) 2011 The Chromium Authors. All rights reserved. +// Copyright (c) 2012 The Chromium Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. @@ -8,306 +8,238 @@ #include <cmath> #include "base/logging.h" +#include "base/memory/scoped_ptr.h" +#include "media/audio/audio_util.h" #include "media/base/buffers.h" namespace media { -// The size in bytes we try to maintain for the |queue_|. Previous usage -// maintained a deque of 16 Buffers, each of size 4Kb. This worked well, so we -// maintain this number of bytes (16 * 4096). -static const int kDefaultMinQueueSizeInBytes = 65536; -// ~64kb @ 44.1k stereo. -static const int kDefaultMinQueueSizeInMilliseconds = 372; -// 3 seconds @ 96kHz 7.1. -static const int kDefaultMaxQueueSizeInBytes = 4608000; -static const int kDefaultMaxQueueSizeInMilliseconds = 3000; - -// Default window and crossfade lengths in seconds. -static const double kDefaultWindowLength = 0.08; -static const double kDefaultCrossfadeLength = 0.008; - -// Default mute ranges for fast/slow audio. These rates would sound better -// under a frequency domain algorithm. -static const float kMinRate = 0.5f; -static const float kMaxRate = 4.0f; +// The starting size in bytes for |audio_buffer_|. +// Previous usage maintained a deque of 16 Buffers, each of size 4Kb. This +// worked well, so we maintain this number of bytes (16 * 4096). +static const size_t kStartingBufferSizeInBytes = 65536; + +// The maximum size in bytes for the |audio_buffer_|. Arbitrarily determined. +// This number represents 3 seconds of 96kHz/16 bit 7.1 surround sound. +static const size_t kMaxBufferSizeInBytes = 4608000; + +// Duration of audio segments used for crossfading (in seconds). +static const double kWindowDuration = 0.08; + +// Duration of crossfade between audio segments (in seconds). +static const double kCrossfadeDuration = 0.008; + +// Max/min supported playback rates for fast/slow audio. Audio outside of these +// ranges are muted. +// Audio at these speeds would sound better under a frequency domain algorithm. +static const float kMinPlaybackRate = 0.5f; +static const float kMaxPlaybackRate = 4.0f; AudioRendererAlgorithmBase::AudioRendererAlgorithmBase() : channels_(0), - sample_rate_(0), - sample_bytes_(0), + samples_per_second_(0), + bytes_per_channel_(0), playback_rate_(0.0f), - queue_(0, kDefaultMinQueueSizeInBytes), - max_queue_capacity_(kDefaultMaxQueueSizeInBytes), - input_step_(0), - output_step_(0), + audio_buffer_(0, kStartingBufferSizeInBytes), crossfade_size_(0), window_size_(0) { } -uint32 AudioRendererAlgorithmBase::FillBuffer(uint8* dest, uint32 length) { - if (IsQueueEmpty()) - return 0; - if (playback_rate_ == 0.0f) - return 0; +AudioRendererAlgorithmBase::~AudioRendererAlgorithmBase() {} - uint32 dest_written = 0; +void AudioRendererAlgorithmBase::Initialize( + int channels, + int samples_per_second, + int bits_per_channel, + float initial_playback_rate, + const base::Closure& callback) { + DCHECK_GT(channels, 0); + DCHECK_LE(channels, 8) << "We only support <= 8 channel audio."; + DCHECK_GT(samples_per_second, 0); + DCHECK_LE(samples_per_second, 256000) + << "We only support sample rates at or below 256000Hz."; + DCHECK_GT(bits_per_channel, 0); + DCHECK_LE(bits_per_channel, 32) << "We only support 8, 16, 32 bit audio."; + DCHECK_EQ(bits_per_channel % 8, 0) << "We only support 8, 16, 32 bit audio."; + DCHECK(!callback.is_null()); - // Handle the simple case of normal playback. - if (playback_rate_ == 1.0f) { - if (QueueSize() < length) - dest_written = CopyFromInput(dest, QueueSize()); - else - dest_written = CopyFromInput(dest, length); - AdvanceInputPosition(dest_written); - return dest_written; - } + channels_ = channels; + samples_per_second_ = samples_per_second; + bytes_per_channel_ = bits_per_channel / 8; + request_read_cb_ = callback; + SetPlaybackRate(initial_playback_rate); - // For other playback rates, OLA with crossfade! - while (true) { - // Mute when out of acceptable quality range or when we don't have enough - // data to completely finish this loop. - // - // Note: This may not play at the speed requested as we can only consume as - // much data as we have, and audio timestamps drive the pipeline clock. - // - // Furthermore, we won't end up scaling the very last bit of audio, but - // we're talking about <8ms of audio data. - if (playback_rate_ < kMinRate || playback_rate_ > kMaxRate || - QueueSize() < window_size_) { - // Calculate the ideal input/output steps based on the size of the - // destination buffer. - uint32 input_step = static_cast<uint32>(ceil( - static_cast<float>(length * playback_rate_))); - uint32 output_step = length; - - // If the ideal size is too big, recalculate based on how much is left in - // the queue. - if (input_step > QueueSize()) { - input_step = QueueSize(); - output_step = static_cast<uint32>(ceil( - static_cast<float>(input_step / playback_rate_))); - } - - // Stay aligned and sanity check before writing out zeros. - AlignToSampleBoundary(&input_step); - AlignToSampleBoundary(&output_step); - DCHECK_LE(output_step, length); - if (output_step > length) { - LOG(ERROR) << "OLA: output_step (" << output_step << ") calculated to " - << "be larger than destination length (" << length << ")"; - output_step = length; - } - - memset(dest, 0, output_step); - AdvanceInputPosition(input_step); - dest_written += output_step; - break; - } + window_size_ = + samples_per_second_ * bytes_per_channel_ * channels_ * kWindowDuration; + AlignToSampleBoundary(&window_size_); - // Break if we don't have enough room left in our buffer to do a full - // OLA iteration. - if (length < (output_step_ + crossfade_size_)) { - break; - } + crossfade_size_ = + samples_per_second_ * bytes_per_channel_ * channels_ * kCrossfadeDuration; + AlignToSampleBoundary(&crossfade_size_); +} - // Copy bulk of data to output (including some to crossfade to the next - // copy), then add to our running sum of written data and subtract from - // our tally of remaining requested. - uint32 copied = CopyFromInput(dest, output_step_ + crossfade_size_); - dest_written += copied; - length -= copied; - - // Advance pointers for crossfade. - dest += output_step_; - AdvanceInputPosition(input_step_); - - // Prepare intermediate buffer. - uint32 crossfade_size; - scoped_array<uint8> src(new uint8[crossfade_size_]); - crossfade_size = CopyFromInput(src.get(), crossfade_size_); - - // Calculate number of samples to crossfade, then do so. - int samples = static_cast<int>(crossfade_size / sample_bytes_ / channels_); - switch (sample_bytes_) { - case 4: - Crossfade(samples, - reinterpret_cast<const int32*>(src.get()), - reinterpret_cast<int32*>(dest)); - break; - case 2: - Crossfade(samples, - reinterpret_cast<const int16*>(src.get()), - reinterpret_cast<int16*>(dest)); - break; - case 1: - Crossfade(samples, src.get(), dest); - break; - default: - NOTREACHED() << "Unsupported audio bit depth sent to OLA algorithm"; - } +uint32 AudioRendererAlgorithmBase::FillBuffer(uint8* dest, uint32 length) { + if (IsQueueEmpty() || playback_rate_ == 0.0f) + return 0; - // Advance pointers again. - AdvanceInputPosition(crossfade_size); - dest += crossfade_size_; + // Handle the simple case of normal playback. + if (playback_rate_ == 1.0f) { + uint32 bytes_written = + CopyFromAudioBuffer(dest, std::min(length, bytes_buffered())); + AdvanceBufferPosition(bytes_written); + return bytes_written; } - return dest_written; -} -void AudioRendererAlgorithmBase::SetPlaybackRate(float new_rate) { - DCHECK_GE(new_rate, 0.0); - playback_rate_ = new_rate; + // Output muted data when out of acceptable quality range. + if (playback_rate_ < kMinPlaybackRate || playback_rate_ > kMaxPlaybackRate) + return MuteBuffer(dest, length); - // Calculate the window size from our default length and our audio properties. - // Precision is not an issue because we will round this to a sample boundary. - // This will not overflow because each parameter is checked in Initialize(). - window_size_ = static_cast<uint32>( - sample_rate_ * sample_bytes_ * channels_ * kDefaultWindowLength); + uint32 input_step = window_size_; + uint32 output_step = window_size_; - // Adjusting step sizes to accommodate requested playback rate. if (playback_rate_ > 1.0f) { - input_step_ = window_size_; - output_step_ = static_cast<uint32>(ceil( - static_cast<float>(window_size_ / playback_rate_))); + // Playback is faster than normal; need to squish output! + output_step = ceil(window_size_ / playback_rate_); } else { - input_step_ = static_cast<uint32>(ceil( - static_cast<float>(window_size_ * playback_rate_))); - output_step_ = window_size_; + // Playback is slower than normal; need to stretch input! + input_step = ceil(window_size_ * playback_rate_); } - AlignToSampleBoundary(&input_step_); - AlignToSampleBoundary(&output_step_); - // Calculate length for crossfading. - crossfade_size_ = static_cast<uint32>( - sample_rate_ * sample_bytes_ * channels_ * kDefaultCrossfadeLength); - AlignToSampleBoundary(&crossfade_size_); - if (crossfade_size_ > std::min(input_step_, output_step_)) { - crossfade_size_ = 0; - return; + AlignToSampleBoundary(&input_step); + AlignToSampleBoundary(&output_step); + DCHECK_LE(crossfade_size_, input_step); + DCHECK_LE(crossfade_size_, output_step); + + uint32 bytes_written = 0; + uint32 bytes_left_to_output = length; + uint8* output_ptr = dest; + + // TODO(vrk): The while loop and if test below are lame! We are requiring the + // client to provide us with enough data to output only complete crossfaded + // windows. Instead, we should output as much data as we can, and add state to + // keep track of what point in the crossfade we are at. + // This is also the cause of crbug.com/108239. + while (bytes_left_to_output >= output_step) { + // If there is not enough data buffered to complete an iteration of the + // loop, mute the remaining and break. + if (bytes_buffered() < window_size_) { + bytes_written += MuteBuffer(output_ptr, bytes_left_to_output); + break; + } + + // Copy |output_step| bytes into destination buffer. + uint32 copied = CopyFromAudioBuffer(output_ptr, output_step); + DCHECK_EQ(copied, output_step); + output_ptr += output_step; + bytes_written += copied; + bytes_left_to_output -= copied; + + // Copy the |crossfade_size_| bytes leading up to the next window that will + // be played into an intermediate buffer. This will be used to crossfade + // from the current window to the next. + AdvanceBufferPosition(input_step - crossfade_size_); + scoped_array<uint8> next_window_intro(new uint8[crossfade_size_]); + uint32 bytes_copied = + CopyFromAudioBuffer(next_window_intro.get(), crossfade_size_); + DCHECK_EQ(bytes_copied, crossfade_size_); + AdvanceBufferPosition(crossfade_size_); + + // Prepare pointers to end of the current window and the start of the next + // window. + uint8* start_of_outro = output_ptr - crossfade_size_; + const uint8* start_of_intro = next_window_intro.get(); + + // Do crossfade! + Crossfade(crossfade_size_, channels_, bytes_per_channel_, + start_of_intro, start_of_outro); } - // To keep true to playback rate, modify the steps. - input_step_ -= crossfade_size_; - output_step_ -= crossfade_size_; -} + return bytes_written; +} + +uint32 AudioRendererAlgorithmBase::MuteBuffer(uint8* dest, uint32 length) { + DCHECK_NE(playback_rate_, 0.0); + // Note: This may not play at the speed requested as we can only consume as + // much data as we have, and audio timestamps drive the pipeline clock. + // + // Furthermore, we won't end up scaling the very last bit of audio, but + // we're talking about <8ms of audio data. + + // Cap the |input_step| by the amount of bytes buffered. + uint32 input_step = + std::min(static_cast<uint32>(length * playback_rate_), bytes_buffered()); + uint32 output_step = input_step / playback_rate_; + AlignToSampleBoundary(&input_step); + AlignToSampleBoundary(&output_step); + + DCHECK_LE(output_step, length); + if (output_step > length) { + LOG(ERROR) << "OLA: output_step (" << output_step << ") calculated to " + << "be larger than destination length (" << length << ")"; + output_step = length; + } -void AudioRendererAlgorithmBase::AlignToSampleBoundary(uint32* value) { - (*value) -= ((*value) % (channels_ * sample_bytes_)); + memset(dest, 0, output_step); + AdvanceBufferPosition(input_step); + return output_step; } -template <class Type> -void AudioRendererAlgorithmBase::Crossfade(int samples, const Type* src, - Type* dest) { - Type* dest_end = dest + samples * channels_; - const Type* src_end = src + samples * channels_; - for (int i = 0; i < samples; ++i) { - double x_ratio = static_cast<double>(i) / static_cast<double>(samples); - for (int j = 0; j < channels_; ++j) { - DCHECK(dest < dest_end); - DCHECK(src < src_end); - (*dest) = static_cast<Type>((*dest) * (1.0 - x_ratio) + - (*src) * x_ratio); - ++src; - ++dest; - } - } +void AudioRendererAlgorithmBase::SetPlaybackRate(float new_rate) { + DCHECK_GE(new_rate, 0.0); + playback_rate_ = new_rate; } -AudioRendererAlgorithmBase::~AudioRendererAlgorithmBase() {} - -void AudioRendererAlgorithmBase::Initialize( - int channels, - int sample_rate, - int sample_bits, - float initial_playback_rate, - const base::Closure& callback) { - DCHECK_GT(channels, 0); - DCHECK_LE(channels, 8) << "We only support <= 8 channel audio."; - DCHECK_GT(sample_rate, 0); - DCHECK_LE(sample_rate, 256000) - << "We only support sample rates at or below 256000Hz."; - DCHECK_GT(sample_bits, 0); - DCHECK_LE(sample_bits, 32) << "We only support 8, 16, 32 bit audio."; - DCHECK_EQ(sample_bits % 8, 0) << "We only support 8, 16, 32 bit audio."; - DCHECK(!callback.is_null()); - - channels_ = channels; - sample_rate_ = sample_rate; - sample_bytes_ = sample_bits / 8; - - // Update the capacity based on time now that we have the audio format - // parameters. - queue_.set_forward_capacity( - DurationToBytes(kDefaultMinQueueSizeInMilliseconds)); - max_queue_capacity_ = - std::min(kDefaultMaxQueueSizeInBytes, - DurationToBytes(kDefaultMaxQueueSizeInMilliseconds)); - - request_read_callback_ = callback; - - SetPlaybackRate(initial_playback_rate); +void AudioRendererAlgorithmBase::AlignToSampleBoundary(uint32* value) { + (*value) -= ((*value) % (channels_ * bytes_per_channel_)); } void AudioRendererAlgorithmBase::FlushBuffers() { // Clear the queue of decoded packets (releasing the buffers). - queue_.Clear(); - request_read_callback_.Run(); + audio_buffer_.Clear(); + request_read_cb_.Run(); } base::TimeDelta AudioRendererAlgorithmBase::GetTime() { - return queue_.current_time(); + return audio_buffer_.current_time(); } void AudioRendererAlgorithmBase::EnqueueBuffer(Buffer* buffer_in) { // If we're at end of stream, |buffer_in| contains no data. if (!buffer_in->IsEndOfStream()) - queue_.Append(buffer_in); + audio_buffer_.Append(buffer_in); // If we still don't have enough data, request more. if (!IsQueueFull()) - request_read_callback_.Run(); -} - -float AudioRendererAlgorithmBase::playback_rate() { - return playback_rate_; + request_read_cb_.Run(); } bool AudioRendererAlgorithmBase::IsQueueEmpty() { - return queue_.forward_bytes() == 0; + return audio_buffer_.forward_bytes() == 0; } bool AudioRendererAlgorithmBase::IsQueueFull() { - return (queue_.forward_bytes() >= queue_.forward_capacity()); -} - -uint32 AudioRendererAlgorithmBase::QueueSize() { - return queue_.forward_bytes(); + return audio_buffer_.forward_bytes() >= audio_buffer_.forward_capacity(); } uint32 AudioRendererAlgorithmBase::QueueCapacity() { - return queue_.forward_capacity(); + return audio_buffer_.forward_capacity(); } void AudioRendererAlgorithmBase::IncreaseQueueCapacity() { - queue_.set_forward_capacity( - std::min(2 * queue_.forward_capacity(), max_queue_capacity_)); -} - -int AudioRendererAlgorithmBase::DurationToBytes( - int duration_in_milliseconds) const { - int64 bytes_per_second = sample_bytes_ * channels_ * sample_rate_; - int64 bytes = duration_in_milliseconds * bytes_per_second / 1000; - return std::min(bytes, static_cast<int64>(kint32max)); + audio_buffer_.set_forward_capacity( + std::min(2 * audio_buffer_.forward_capacity(), kMaxBufferSizeInBytes)); } -void AudioRendererAlgorithmBase::AdvanceInputPosition(uint32 bytes) { - queue_.Seek(bytes); +void AudioRendererAlgorithmBase::AdvanceBufferPosition(uint32 bytes) { + audio_buffer_.Seek(bytes); if (!IsQueueFull()) - request_read_callback_.Run(); + request_read_cb_.Run(); } -uint32 AudioRendererAlgorithmBase::CopyFromInput(uint8* dest, uint32 bytes) { - return queue_.Peek(dest, bytes); +uint32 AudioRendererAlgorithmBase::CopyFromAudioBuffer( + uint8* dest, uint32 bytes) { + return audio_buffer_.Peek(dest, bytes); } } // namespace media diff --git a/media/filters/audio_renderer_algorithm_base.h b/media/filters/audio_renderer_algorithm_base.h index 33f5c31..2b134aa 100644 --- a/media/filters/audio_renderer_algorithm_base.h +++ b/media/filters/audio_renderer_algorithm_base.h @@ -22,12 +22,8 @@ #ifndef MEDIA_FILTERS_AUDIO_RENDERER_ALGORITHM_BASE_H_ #define MEDIA_FILTERS_AUDIO_RENDERER_ALGORITHM_BASE_H_ -#include <deque> - #include "base/callback.h" #include "base/gtest_prod_util.h" -#include "base/memory/ref_counted.h" -#include "base/memory/scoped_ptr.h" #include "media/base/seekable_buffer.h" namespace media { @@ -39,16 +35,34 @@ class MEDIA_EXPORT AudioRendererAlgorithmBase { AudioRendererAlgorithmBase(); ~AudioRendererAlgorithmBase(); - // Checks validity of audio parameters. - void Initialize( - int channels, int sample_rate, int sample_bits, - float initial_playback_rate, const base::Closure& callback); - - // Tries to fill |length| bytes of |dest| with possibly scaled data from - // our |queue_|. Returns the number of bytes copied into |dest|. + // Initializes this object with information about the audio stream. + // |samples_per_second| is in Hz. |read_request_callback| is called to + // request more data from the client, requests that are fulfilled through + // calls to EnqueueBuffer(). + void Initialize(int channels, + int samples_per_second, + int bits_per_channel, + float initial_playback_rate, + const base::Closure& request_read_cb); + + // Tries to fill |length| bytes of |dest| with possibly scaled data from our + // |audio_buffer_|. Data is scaled based on the playback rate, using a + // variation of the Overlap-Add method to combine sample windows. + // + // Data from |audio_buffer_| is consumed in proportion to the playback rate. + // FillBuffer() will fit |playback_rate_| * |length| bytes of raw data from + // |audio_buffer| into |length| bytes of output data in |dest| by chopping up + // the buffered data into windows and crossfading from one window to the next. + // For speeds greater than 1.0f, FillBuffer() "squish" the windows, dropping + // some data in between windows to meet the sped-up playback. For speeds less + // than 1.0f, FillBuffer() will "stretch" the window by copying and + // overlapping data at the window boundaries, crossfading in between. + // + // Returns the number of bytes copied into |dest|. + // May request more reads via |request_read_cb_| before returning. uint32 FillBuffer(uint8* dest, uint32 length); - // Clears |queue_|. + // Clears |audio_buffer_|. void FlushBuffers(); // Returns the time of the next byte in our data or kNoTimestamp if current @@ -59,80 +73,59 @@ class MEDIA_EXPORT AudioRendererAlgorithmBase { // read completes. void EnqueueBuffer(Buffer* buffer_in); - // Getter/setter for |playback_rate_|. - float playback_rate(); + float playback_rate() const { return playback_rate_; } void SetPlaybackRate(float new_rate); - // Returns whether |queue_| is empty. + // Returns whether |audio_buffer_| is empty. bool IsQueueEmpty(); - // Returns true if we have enough data + // Returns true if |audio_buffer_| is at or exceeds capacity. bool IsQueueFull(); - // Returns the number of bytes left in |queue_|, which may be larger than - // QueueCapacity() in the event that a read callback delivered more data than - // |queue_| was intending to hold. - uint32 QueueSize(); - - // Returns the capacity of |queue_|. + // Returns the capacity of |audio_buffer_|. uint32 QueueCapacity(); - // Increase the capacity of |queue_| if possible. + // Increase the capacity of |audio_buffer_| if possible. void IncreaseQueueCapacity(); + // Returns the number of bytes left in |audio_buffer_|, which may be larger + // than QueueCapacity() in the event that a read callback delivered more data + // than |audio_buffer_| was intending to hold. + uint32 bytes_buffered() { return audio_buffer_.forward_bytes(); } + + uint32 window_size() { return window_size_; } + private: - // Advances |queue_|'s internal pointer by |bytes|. - void AdvanceInputPosition(uint32 bytes); - - // Tries to copy |bytes| bytes from |queue_| to |dest|. Returns the number of - // bytes successfully copied. - uint32 CopyFromInput(uint8* dest, uint32 bytes); - - // Converts a duration in milliseconds to a byte count based on - // the current sample rate, channel count, and bytes per sample. - int DurationToBytes(int duration_in_milliseconds) const; - - FRIEND_TEST_ALL_PREFIXES(AudioRendererAlgorithmBaseTest, - FillBuffer_NormalRate); - FRIEND_TEST_ALL_PREFIXES(AudioRendererAlgorithmBaseTest, - FillBuffer_DoubleRate); - FRIEND_TEST_ALL_PREFIXES(AudioRendererAlgorithmBaseTest, - FillBuffer_HalfRate); - FRIEND_TEST_ALL_PREFIXES(AudioRendererAlgorithmBaseTest, - FillBuffer_QuarterRate); + // Advances |audio_buffer_|'s internal pointer by |bytes|. + void AdvanceBufferPosition(uint32 bytes); + + // Tries to copy |bytes| bytes from |audio_buffer_| to |dest|. + // Returns the number of bytes successfully copied. + uint32 CopyFromAudioBuffer(uint8* dest, uint32 bytes); // Aligns |value| to a channel and sample boundary. void AlignToSampleBoundary(uint32* value); - // Crossfades |samples| samples of |dest| with the data in |src|. Assumes - // there is room in |dest| and enough data in |src|. Type is the datatype - // of a data point in the waveform (i.e. uint8, int16, int32, etc). Also, - // sizeof(one sample) == sizeof(Type) * channels. - template <class Type> - void Crossfade(int samples, const Type* src, Type* dest); + // Attempts to write |length| bytes of muted audio into |dest|. + uint32 MuteBuffer(uint8* dest, uint32 length); - // Audio properties. + // Number of channels in audio stream. int channels_; - int sample_rate_; - int sample_bytes_; + + // Sample rate of audio stream. + int samples_per_second_; + + // Byte depth of audio. + int bytes_per_channel_; // Used by algorithm to scale output. float playback_rate_; // Used to request more data. - base::Closure request_read_callback_; - - // Queued audio data. - SeekableBuffer queue_; - - // Largest capacity queue_ can grow to. - size_t max_queue_capacity_; + base::Closure request_read_cb_; - // Members for ease of calculation in FillBuffer(). These members are based - // on |playback_rate_|, but are stored separately so they don't have to be - // recalculated on every call to FillBuffer(). - uint32 input_step_; - uint32 output_step_; + // Buffered audio data. + SeekableBuffer audio_buffer_; // Length for crossfade in bytes. uint32 crossfade_size_; diff --git a/media/filters/audio_renderer_algorithm_base_unittest.cc b/media/filters/audio_renderer_algorithm_base_unittest.cc index 5f1db40..75d8c52 100644 --- a/media/filters/audio_renderer_algorithm_base_unittest.cc +++ b/media/filters/audio_renderer_algorithm_base_unittest.cc @@ -33,12 +33,12 @@ TEST(AudioRendererAlgorithmBaseTest, FillBuffer_NormalRate) { const size_t kDataSize = 1024; algorithm.EnqueueBuffer(new DataBuffer( scoped_array<uint8>(new uint8[kDataSize]), kDataSize)); - EXPECT_EQ(kDataSize, algorithm.QueueSize()); + EXPECT_EQ(kDataSize, algorithm.bytes_buffered()); // Read the same sized amount. scoped_array<uint8> data(new uint8[kDataSize]); EXPECT_EQ(kDataSize, algorithm.FillBuffer(data.get(), kDataSize)); - EXPECT_EQ(0u, algorithm.QueueSize()); + EXPECT_EQ(0u, algorithm.bytes_buffered()); } TEST(AudioRendererAlgorithmBaseTest, FillBuffer_DoubleRate) { @@ -55,8 +55,8 @@ TEST(AudioRendererAlgorithmBaseTest, FillBuffer_DoubleRate) { const size_t kBufferSize = 16 * 1024; scoped_array<uint8> data(new uint8[kBufferSize]); const size_t kTestData[][2] = { - { algorithm.window_size_, algorithm.window_size_ / 2}, - { algorithm.window_size_ / 2, algorithm.window_size_ / 4}, + { algorithm.window_size(), algorithm.window_size() / 2}, + { algorithm.window_size() / 2, algorithm.window_size() / 4}, { 4u, 2u }, { 0u, 0u }, }; @@ -65,12 +65,12 @@ TEST(AudioRendererAlgorithmBaseTest, FillBuffer_DoubleRate) { const size_t kDataSize = kTestData[i][0]; algorithm.EnqueueBuffer(new DataBuffer( scoped_array<uint8>(new uint8[kDataSize]), kDataSize)); - EXPECT_EQ(kDataSize, algorithm.QueueSize()); + EXPECT_EQ(kDataSize, algorithm.bytes_buffered()); const size_t kExpectedSize = kTestData[i][1]; ASSERT_LE(kExpectedSize, kBufferSize); EXPECT_EQ(kExpectedSize, algorithm.FillBuffer(data.get(), kBufferSize)); - EXPECT_EQ(0u, algorithm.QueueSize()); + EXPECT_EQ(0u, algorithm.bytes_buffered()); } } @@ -88,8 +88,8 @@ TEST(AudioRendererAlgorithmBaseTest, FillBuffer_HalfRate) { const size_t kBufferSize = 16 * 1024; scoped_array<uint8> data(new uint8[kBufferSize]); const size_t kTestData[][2] = { - { algorithm.window_size_, algorithm.window_size_ * 2 }, - { algorithm.window_size_ / 2, algorithm.window_size_ }, + { algorithm.window_size(), algorithm.window_size() * 2 }, + { algorithm.window_size() / 2, algorithm.window_size() }, { 2u, 4u }, { 0u, 0u }, }; @@ -98,12 +98,12 @@ TEST(AudioRendererAlgorithmBaseTest, FillBuffer_HalfRate) { const size_t kDataSize = kTestData[i][0]; algorithm.EnqueueBuffer(new DataBuffer( scoped_array<uint8>(new uint8[kDataSize]), kDataSize)); - EXPECT_EQ(kDataSize, algorithm.QueueSize()); + EXPECT_EQ(kDataSize, algorithm.bytes_buffered()); const size_t kExpectedSize = kTestData[i][1]; ASSERT_LE(kExpectedSize, kBufferSize); EXPECT_EQ(kExpectedSize, algorithm.FillBuffer(data.get(), kBufferSize)); - EXPECT_EQ(0u, algorithm.QueueSize()); + EXPECT_EQ(0u, algorithm.bytes_buffered()); } } @@ -121,8 +121,8 @@ TEST(AudioRendererAlgorithmBaseTest, FillBuffer_QuarterRate) { const size_t kBufferSize = 16 * 1024; scoped_array<uint8> data(new uint8[kBufferSize]); const size_t kTestData[][2] = { - { algorithm.window_size_, algorithm.window_size_ * 4}, - { algorithm.window_size_ / 2, algorithm.window_size_ * 2}, + { algorithm.window_size(), algorithm.window_size() * 4}, + { algorithm.window_size() / 2, algorithm.window_size() * 2}, { 1u, 4u }, { 0u, 0u }, }; @@ -131,12 +131,12 @@ TEST(AudioRendererAlgorithmBaseTest, FillBuffer_QuarterRate) { const size_t kDataSize = kTestData[i][0]; algorithm.EnqueueBuffer(new DataBuffer(scoped_array<uint8>( new uint8[kDataSize]), kDataSize)); - EXPECT_EQ(kDataSize, algorithm.QueueSize()); + EXPECT_EQ(kDataSize, algorithm.bytes_buffered()); const size_t kExpectedSize = kTestData[i][1]; ASSERT_LE(kExpectedSize, kBufferSize); EXPECT_EQ(kExpectedSize, algorithm.FillBuffer(data.get(), kBufferSize)); - EXPECT_EQ(0u, algorithm.QueueSize()); + EXPECT_EQ(0u, algorithm.bytes_buffered()); } } diff --git a/media/filters/audio_renderer_base_unittest.cc b/media/filters/audio_renderer_base_unittest.cc index 2dd5cc8..93fe785 100644 --- a/media/filters/audio_renderer_base_unittest.cc +++ b/media/filters/audio_renderer_base_unittest.cc @@ -1,4 +1,4 @@ -// Copyright (c) 2011 The Chromium Authors. All rights reserved. +// Copyright (c) 2012 The Chromium Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. @@ -156,7 +156,7 @@ class AudioRendererBaseTest : public ::testing::Test { } uint32 bytes_buffered() { - return renderer_->algorithm_->QueueSize(); + return renderer_->algorithm_->bytes_buffered(); } uint32 buffer_capacity() { |