diff options
author | dalecurtis@google.com <dalecurtis@google.com@0039d316-1c4b-4281-b951-d872f2087c98> | 2012-12-15 00:47:49 +0000 |
---|---|---|
committer | dalecurtis@google.com <dalecurtis@google.com@0039d316-1c4b-4281-b951-d872f2087c98> | 2012-12-15 00:47:49 +0000 |
commit | 802ecdb9cee0d66fe546bdf24e98150f8f716ad8 (patch) | |
tree | 4c6580e3681b795a758ce3bcd9a44c3b9045ea0c /media | |
parent | c86e7a815b8a8b8af67bba66561be1aef1e399a3 (diff) | |
download | chromium_src-802ecdb9cee0d66fe546bdf24e98150f8f716ad8.zip chromium_src-802ecdb9cee0d66fe546bdf24e98150f8f716ad8.tar.gz chromium_src-802ecdb9cee0d66fe546bdf24e98150f8f716ad8.tar.bz2 |
Protect AudioRendererAlgorithm from invalid step sizes.
BUG=165430
TEST=unittests and asan pass.
Review URL: https://codereview.chromium.org/11573023
git-svn-id: svn://svn.chromium.org/chrome/trunk/src@173249 0039d316-1c4b-4281-b951-d872f2087c98
Diffstat (limited to 'media')
-rw-r--r-- | media/filters/audio_renderer_algorithm.cc | 40 | ||||
-rw-r--r-- | media/filters/audio_renderer_algorithm.h | 6 | ||||
-rw-r--r-- | media/filters/audio_renderer_algorithm_unittest.cc | 103 |
3 files changed, 69 insertions, 80 deletions
diff --git a/media/filters/audio_renderer_algorithm.cc b/media/filters/audio_renderer_algorithm.cc index 28d48a2..9e84b9b 100644 --- a/media/filters/audio_renderer_algorithm.cc +++ b/media/filters/audio_renderer_algorithm.cc @@ -83,6 +83,11 @@ int AudioRendererAlgorithm::FillBuffer( if (playback_rate_ == 0.0f) return 0; + int slower_step = ceil(window_size_ * playback_rate_); + int faster_step = ceil(window_size_ / playback_rate_); + AlignToFrameBoundary(&slower_step); + AlignToFrameBoundary(&faster_step); + int total_frames_rendered = 0; uint8* output_ptr = dest; while (total_frames_rendered < requested_frames) { @@ -90,12 +95,15 @@ int AudioRendererAlgorithm::FillBuffer( ResetWindow(); bool rendered_frame = true; - if (playback_rate_ > 1.0) - rendered_frame = OutputFasterPlayback(output_ptr); - else if (playback_rate_ < 1.0) - rendered_frame = OutputSlowerPlayback(output_ptr); - else + if (window_size_ > faster_step) { + rendered_frame = OutputFasterPlayback( + output_ptr, window_size_, faster_step); + } else if (slower_step < window_size_) { + rendered_frame = OutputSlowerPlayback( + output_ptr, slower_step, window_size_); + } else { rendered_frame = OutputNormalPlayback(output_ptr); + } if (!rendered_frame) { needs_more_data_ = true; @@ -114,7 +122,11 @@ void AudioRendererAlgorithm::ResetWindow() { crossfade_frame_number_ = 0; } -bool AudioRendererAlgorithm::OutputFasterPlayback(uint8* dest) { +bool AudioRendererAlgorithm::OutputFasterPlayback(uint8* dest, + int input_step, + int output_step) { + // Ensure we don't run into OOB read/write situation. + CHECK_GT(input_step, output_step); DCHECK_LT(index_into_window_, window_size_); DCHECK_GT(playback_rate_, 1.0); @@ -131,11 +143,6 @@ bool AudioRendererAlgorithm::OutputFasterPlayback(uint8* dest) { // // The duration of each phase is computed below based on the |window_size_| // and |playback_rate_|. - int input_step = window_size_; - int output_step = ceil(window_size_ / playback_rate_); - AlignToFrameBoundary(&output_step); - DCHECK_GT(input_step, output_step); - int bytes_to_crossfade = bytes_in_crossfade_; if (muted_ || bytes_to_crossfade > output_step) bytes_to_crossfade = 0; @@ -203,7 +210,11 @@ bool AudioRendererAlgorithm::OutputFasterPlayback(uint8* dest) { return true; } -bool AudioRendererAlgorithm::OutputSlowerPlayback(uint8* dest) { +bool AudioRendererAlgorithm::OutputSlowerPlayback(uint8* dest, + int input_step, + int output_step) { + // Ensure we don't run into OOB read/write situation. + CHECK_LT(input_step, output_step); DCHECK_LT(index_into_window_, window_size_); DCHECK_LT(playback_rate_, 1.0); DCHECK_NE(playback_rate_, 0.0); @@ -224,11 +235,6 @@ bool AudioRendererAlgorithm::OutputSlowerPlayback(uint8* dest) { // // The duration of each phase is computed below based on the |window_size_| // and |playback_rate_|. - int input_step = ceil(window_size_ * playback_rate_); - AlignToFrameBoundary(&input_step); - int output_step = window_size_; - DCHECK_LT(input_step, output_step); - int bytes_to_crossfade = bytes_in_crossfade_; if (muted_ || bytes_to_crossfade > input_step) bytes_to_crossfade = 0; diff --git a/media/filters/audio_renderer_algorithm.h b/media/filters/audio_renderer_algorithm.h index 644592c..080a11d 100644 --- a/media/filters/audio_renderer_algorithm.h +++ b/media/filters/audio_renderer_algorithm.h @@ -88,6 +88,8 @@ class MEDIA_EXPORT AudioRendererAlgorithm { int bytes_per_channel() { return bytes_per_channel_; } + int samples_per_second() { return samples_per_second_; } + bool is_muted() { return muted_; } private: @@ -103,7 +105,7 @@ class MEDIA_EXPORT AudioRendererAlgorithm { // data at normal speed, then we "fast forward" by dropping the next bit of // audio data, and then we stich the pieces together by crossfading from one // audio chunk to the next. - bool OutputFasterPlayback(uint8* dest); + bool OutputFasterPlayback(uint8* dest, int input_step, int output_step); // Fills |dest| with one frame of audio data at slower than normal speed. // Returns true if a frame was rendered, false otherwise. @@ -114,7 +116,7 @@ class MEDIA_EXPORT AudioRendererAlgorithm { // by repeating some of the audio data from the previous audio segment. // Segments are stiched together by crossfading from one audio chunk to the // next. - bool OutputSlowerPlayback(uint8* dest); + bool OutputSlowerPlayback(uint8* dest, int input_step, int output_step); // Resets the window state to the start of a new window. void ResetWindow(); diff --git a/media/filters/audio_renderer_algorithm_unittest.cc b/media/filters/audio_renderer_algorithm_unittest.cc index 6ef2447..33e2fd0 100644 --- a/media/filters/audio_renderer_algorithm_unittest.cc +++ b/media/filters/audio_renderer_algorithm_unittest.cc @@ -19,8 +19,8 @@ namespace media { -static const size_t kRawDataSize = 10 * 1024; -static const int kSamplesPerSecond = 44100; +static const size_t kRawDataSize = 2048; +static const int kSamplesPerSecond = 3000; static const ChannelLayout kDefaultChannelLayout = CHANNEL_LAYOUT_STEREO; static const int kDefaultSampleBits = 16; @@ -33,13 +33,16 @@ class AudioRendererAlgorithmTest : public testing::Test { ~AudioRendererAlgorithmTest() {} void Initialize() { - Initialize(kDefaultChannelLayout, kDefaultSampleBits); + Initialize(kDefaultChannelLayout, kDefaultSampleBits, kSamplesPerSecond); } - void Initialize(ChannelLayout channel_layout, int bits_per_channel) { + void Initialize(ChannelLayout channel_layout, int bits_per_channel, + int samples_per_second) { + static const int kFrames = kRawDataSize / ((kDefaultSampleBits / 8) * + ChannelLayoutToChannelCount(kDefaultChannelLayout)); AudioParameters params( media::AudioParameters::AUDIO_PCM_LINEAR, channel_layout, - kSamplesPerSecond, bits_per_channel, kRawDataSize); + samples_per_second, bits_per_channel, kFrames); algorithm_.Initialize(1, params, base::Bind( &AudioRendererAlgorithmTest::EnqueueData, base::Unretained(this))); @@ -50,61 +53,22 @@ class AudioRendererAlgorithmTest : public testing::Test { scoped_array<uint8> audio_data(new uint8[kRawDataSize]); CHECK_EQ(kRawDataSize % algorithm_.bytes_per_channel(), 0u); CHECK_EQ(kRawDataSize % algorithm_.bytes_per_frame(), 0u); - size_t length = kRawDataSize / algorithm_.bytes_per_channel(); - switch (algorithm_.bytes_per_channel()) { - case 4: - WriteFakeData<int32>(audio_data.get(), length); - break; - case 2: - WriteFakeData<int16>(audio_data.get(), length); - break; - case 1: - WriteFakeData<uint8>(audio_data.get(), length); - break; - default: - NOTREACHED() << "Unsupported audio bit depth in crossfade."; - } + // The value of the data is meaningless; we just want non-zero data to + // differentiate it from muted data. + memset(audio_data.get(), 1, kRawDataSize); algorithm_.EnqueueBuffer(new DataBuffer(audio_data.Pass(), kRawDataSize)); bytes_enqueued_ += kRawDataSize; } - template <class Type> - void WriteFakeData(uint8* audio_data, size_t length) { - Type* output = reinterpret_cast<Type*>(audio_data); - for (size_t i = 0; i < length; i++) { - // The value of the data is meaningless; we just want non-zero data to - // differentiate it from muted data. - output[i] = i % 5 + 10; - } - } + void CheckFakeData(uint8* audio_data, int frames_written) { + int sum = 0; + for (int i = 0; i < frames_written * algorithm_.bytes_per_frame(); ++i) + sum |= audio_data[i]; - void CheckFakeData(uint8* audio_data, int frames_written, - double playback_rate) { - size_t length = - (frames_written * algorithm_.bytes_per_frame()) - / algorithm_.bytes_per_channel(); - - switch (algorithm_.bytes_per_channel()) { - case 4: - DoCheckFakeData<int32>(audio_data, length); - break; - case 2: - DoCheckFakeData<int16>(audio_data, length); - break; - case 1: - DoCheckFakeData<uint8>(audio_data, length); - break; - default: - NOTREACHED() << "Unsupported audio bit depth in crossfade."; - } - } - - template <class Type> - void DoCheckFakeData(uint8* audio_data, size_t length) { - Type* output = reinterpret_cast<Type*>(audio_data); - for (size_t i = 0; i < length; i++) { - EXPECT_TRUE(algorithm_.is_muted() || output[i] != 0); - } + if (algorithm_.is_muted()) + ASSERT_EQ(sum, 0); + else + ASSERT_NE(sum, 0); } int ComputeConsumedBytes(int initial_bytes_enqueued, @@ -117,8 +81,8 @@ class AudioRendererAlgorithmTest : public testing::Test { } void TestPlaybackRate(double playback_rate) { - static const int kDefaultBufferSize = kSamplesPerSecond / 10; - static const int kDefaultFramesRequested = 5 * kSamplesPerSecond; + const int kDefaultBufferSize = algorithm_.samples_per_second() / 10; + const int kDefaultFramesRequested = 2 * algorithm_.samples_per_second(); TestPlaybackRate(playback_rate, kDefaultBufferSize, kDefaultFramesRequested); @@ -147,8 +111,8 @@ class AudioRendererAlgorithmTest : public testing::Test { int frames_requested = std::min(buffer_size_in_frames, frames_remaining); int frames_written = algorithm_.FillBuffer(buffer.get(), frames_requested); - CHECK_GT(frames_written, 0); - CheckFakeData(buffer.get(), frames_written, playback_rate); + ASSERT_GT(frames_written, 0); + CheckFakeData(buffer.get(), frames_written); frames_remaining -= frames_written; } @@ -191,6 +155,16 @@ TEST_F(AudioRendererAlgorithmTest, FillBuffer_NormalRate) { TestPlaybackRate(1.0); } +TEST_F(AudioRendererAlgorithmTest, FillBuffer_NearlyNormalFasterRate) { + Initialize(); + TestPlaybackRate(1.0001); +} + +TEST_F(AudioRendererAlgorithmTest, FillBuffer_NearlyNormalSlowerRate) { + Initialize(); + TestPlaybackRate(0.9999); +} + TEST_F(AudioRendererAlgorithmTest, FillBuffer_OneAndAQuarterRate) { Initialize(); TestPlaybackRate(1.25); @@ -269,10 +243,17 @@ TEST_F(AudioRendererAlgorithmTest, FillBuffer_SmallBufferSize) { TestPlaybackRate(1.5, kBufferSizeInFrames, kFramesRequested); } +TEST_F(AudioRendererAlgorithmTest, FillBuffer_LargeBufferSize) { + Initialize(kDefaultChannelLayout, kDefaultSampleBits, 44100); + TestPlaybackRate(1.0); + TestPlaybackRate(0.5); + TestPlaybackRate(1.5); +} + TEST_F(AudioRendererAlgorithmTest, FillBuffer_LowerQualityAudio) { static const ChannelLayout kChannelLayout = CHANNEL_LAYOUT_MONO; static const int kSampleBits = 8; - Initialize(kChannelLayout, kSampleBits); + Initialize(kChannelLayout, kSampleBits, kSamplesPerSecond); TestPlaybackRate(1.0); TestPlaybackRate(0.5); TestPlaybackRate(1.5); @@ -281,7 +262,7 @@ TEST_F(AudioRendererAlgorithmTest, FillBuffer_LowerQualityAudio) { TEST_F(AudioRendererAlgorithmTest, FillBuffer_HigherQualityAudio) { static const ChannelLayout kChannelLayout = CHANNEL_LAYOUT_STEREO; static const int kSampleBits = 32; - Initialize(kChannelLayout, kSampleBits); + Initialize(kChannelLayout, kSampleBits, kSamplesPerSecond); TestPlaybackRate(1.0); TestPlaybackRate(0.5); TestPlaybackRate(1.5); |