summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authordalecurtis@chromium.org <dalecurtis@chromium.org@0039d316-1c4b-4281-b951-d872f2087c98>2014-03-04 19:24:48 +0000
committerdalecurtis@chromium.org <dalecurtis@chromium.org@0039d316-1c4b-4281-b951-d872f2087c98>2014-03-04 19:24:48 +0000
commit08a4485277777c9f9dbfdec32275acc89631255d (patch)
treef98fe879d7290d8f5a05bd3a4031ed605f432c59
parent41ef985af8825464054b945c876a81ec82ab34e0 (diff)
downloadchromium_src-08a4485277777c9f9dbfdec32275acc89631255d.zip
chromium_src-08a4485277777c9f9dbfdec32275acc89631255d.tar.gz
chromium_src-08a4485277777c9f9dbfdec32275acc89631255d.tar.bz2
Cleanup Make(Planar|Interleaved)AudioBuffer functions.
Removes an unnecessary copy now that we have support for writing into existing AudioBuffer objects. BUG=none TEST=unittests still pass. NOTRY=true Review URL: https://codereview.chromium.org/183513007 git-svn-id: svn://svn.chromium.org/chrome/trunk/src@254798 0039d316-1c4b-4281-b951-d872f2087c98
-rw-r--r--media/base/audio_buffer_queue_unittest.cc78
-rw-r--r--media/base/audio_buffer_unittest.cc40
-rw-r--r--media/base/audio_splicer_unittest.cc2
-rw-r--r--media/base/test_helpers.cc114
-rw-r--r--media/base/test_helpers.h44
-rw-r--r--media/filters/audio_renderer_algorithm_unittest.cc42
-rw-r--r--media/filters/audio_renderer_impl_unittest.cc14
7 files changed, 142 insertions, 192 deletions
diff --git a/media/base/audio_buffer_queue_unittest.cc b/media/base/audio_buffer_queue_unittest.cc
index b95bdca..b765009 100644
--- a/media/base/audio_buffer_queue_unittest.cc
+++ b/media/base/audio_buffer_queue_unittest.cc
@@ -34,12 +34,12 @@ TEST(AudioBufferQueueTest, AppendAndClear) {
const base::TimeDelta kNoTime = kNoTimestamp();
AudioBufferQueue buffer;
EXPECT_EQ(0, buffer.frames());
- buffer.Append(MakeInterleavedAudioBuffer<uint8>(
+ buffer.Append(MakeAudioBuffer<uint8>(
kSampleFormatU8, channels, 10, 1, frames, kNoTime, kNoTime));
EXPECT_EQ(frames, buffer.frames());
buffer.Clear();
EXPECT_EQ(0, buffer.frames());
- buffer.Append(MakeInterleavedAudioBuffer<uint8>(
+ buffer.Append(MakeAudioBuffer<uint8>(
kSampleFormatU8, channels, 20, 1, frames, kNoTime, kNoTime));
EXPECT_EQ(frames, buffer.frames());
}
@@ -51,19 +51,19 @@ TEST(AudioBufferQueueTest, MultipleAppend) {
AudioBufferQueue buffer;
// Append 40 frames in 5 buffers.
- buffer.Append(MakeInterleavedAudioBuffer<uint8>(
+ buffer.Append(MakeAudioBuffer<uint8>(
kSampleFormatU8, channels, 10, 1, frames, kNoTime, kNoTime));
EXPECT_EQ(8, buffer.frames());
- buffer.Append(MakeInterleavedAudioBuffer<uint8>(
+ buffer.Append(MakeAudioBuffer<uint8>(
kSampleFormatU8, channels, 10, 1, frames, kNoTime, kNoTime));
EXPECT_EQ(16, buffer.frames());
- buffer.Append(MakeInterleavedAudioBuffer<uint8>(
+ buffer.Append(MakeAudioBuffer<uint8>(
kSampleFormatU8, channels, 10, 1, frames, kNoTime, kNoTime));
EXPECT_EQ(24, buffer.frames());
- buffer.Append(MakeInterleavedAudioBuffer<uint8>(
+ buffer.Append(MakeAudioBuffer<uint8>(
kSampleFormatU8, channels, 10, 1, frames, kNoTime, kNoTime));
EXPECT_EQ(32, buffer.frames());
- buffer.Append(MakeInterleavedAudioBuffer<uint8>(
+ buffer.Append(MakeAudioBuffer<uint8>(
kSampleFormatU8, channels, 10, 1, frames, kNoTime, kNoTime));
EXPECT_EQ(40, buffer.frames());
}
@@ -77,7 +77,7 @@ TEST(AudioBufferQueueTest, IteratorCheck) {
// Append 40 frames in 5 buffers. Intersperse ReadFrames() to make the
// iterator is pointing to the correct position.
- buffer.Append(MakeInterleavedAudioBuffer<float>(
+ buffer.Append(MakeAudioBuffer<float>(
kSampleFormatF32, channels, 10.0f, 1.0f, frames, kNoTime, kNoTime));
EXPECT_EQ(8, buffer.frames());
@@ -85,10 +85,10 @@ TEST(AudioBufferQueueTest, IteratorCheck) {
EXPECT_EQ(4, buffer.frames());
VerifyResult(bus->channel(0), 4, 10.0f, 1.0f);
- buffer.Append(MakeInterleavedAudioBuffer<float>(
+ buffer.Append(MakeAudioBuffer<float>(
kSampleFormatF32, channels, 20.0f, 1.0f, frames, kNoTime, kNoTime));
EXPECT_EQ(12, buffer.frames());
- buffer.Append(MakeInterleavedAudioBuffer<float>(
+ buffer.Append(MakeAudioBuffer<float>(
kSampleFormatF32, channels, 30.0f, 1.0f, frames, kNoTime, kNoTime));
EXPECT_EQ(20, buffer.frames());
@@ -97,10 +97,10 @@ TEST(AudioBufferQueueTest, IteratorCheck) {
EXPECT_EQ(0, buffer.frames());
VerifyResult(bus->channel(0), 4, 34.0f, 1.0f);
- buffer.Append(MakeInterleavedAudioBuffer<float>(
+ buffer.Append(MakeAudioBuffer<float>(
kSampleFormatF32, channels, 40.0f, 1.0f, frames, kNoTime, kNoTime));
EXPECT_EQ(8, buffer.frames());
- buffer.Append(MakeInterleavedAudioBuffer<float>(
+ buffer.Append(MakeAudioBuffer<float>(
kSampleFormatF32, channels, 50.0f, 1.0f, frames, kNoTime, kNoTime));
EXPECT_EQ(16, buffer.frames());
@@ -121,7 +121,7 @@ TEST(AudioBufferQueueTest, Seek) {
AudioBufferQueue buffer;
// Add 6 frames of data.
- buffer.Append(MakeInterleavedAudioBuffer<float>(
+ buffer.Append(MakeAudioBuffer<float>(
kSampleFormatF32, channels, 1.0f, 1.0f, frames, kNoTime, kNoTime));
EXPECT_EQ(6, buffer.frames());
@@ -143,11 +143,11 @@ TEST(AudioBufferQueueTest, ReadF32) {
AudioBufferQueue buffer;
// Add 76 frames of data.
- buffer.Append(MakeInterleavedAudioBuffer<float>(
+ buffer.Append(MakeAudioBuffer<float>(
kSampleFormatF32, channels, 1.0f, 1.0f, 6, kNoTime, kNoTime));
- buffer.Append(MakeInterleavedAudioBuffer<float>(
+ buffer.Append(MakeAudioBuffer<float>(
kSampleFormatF32, channels, 13.0f, 1.0f, 10, kNoTime, kNoTime));
- buffer.Append(MakeInterleavedAudioBuffer<float>(
+ buffer.Append(MakeAudioBuffer<float>(
kSampleFormatF32, channels, 33.0f, 1.0f, 60, kNoTime, kNoTime));
EXPECT_EQ(76, buffer.frames());
@@ -182,7 +182,7 @@ TEST(AudioBufferQueueTest, ReadU8) {
AudioBufferQueue buffer;
// Add 4 frames of data.
- buffer.Append(MakeInterleavedAudioBuffer<uint8>(
+ buffer.Append(MakeAudioBuffer<uint8>(
kSampleFormatU8, channels, 128, 1, frames, kNoTime, kNoTime));
// Read all 4 frames from the buffer. Data is interleaved, so ch[0] should be
@@ -204,9 +204,9 @@ TEST(AudioBufferQueueTest, ReadS16) {
AudioBufferQueue buffer;
// Add 24 frames of data.
- buffer.Append(MakeInterleavedAudioBuffer<int16>(
+ buffer.Append(MakeAudioBuffer<int16>(
kSampleFormatS16, channels, 1, 1, 4, kNoTime, kNoTime));
- buffer.Append(MakeInterleavedAudioBuffer<int16>(
+ buffer.Append(MakeAudioBuffer<int16>(
kSampleFormatS16, channels, 9, 1, 20, kNoTime, kNoTime));
EXPECT_EQ(24, buffer.frames());
@@ -226,9 +226,9 @@ TEST(AudioBufferQueueTest, ReadS32) {
AudioBufferQueue buffer;
// Add 24 frames of data.
- buffer.Append(MakeInterleavedAudioBuffer<int32>(
+ buffer.Append(MakeAudioBuffer<int32>(
kSampleFormatS32, channels, 1, 1, 4, kNoTime, kNoTime));
- buffer.Append(MakeInterleavedAudioBuffer<int32>(
+ buffer.Append(MakeAudioBuffer<int32>(
kSampleFormatS32, channels, 9, 1, 20, kNoTime, kNoTime));
EXPECT_EQ(24, buffer.frames());
@@ -254,9 +254,9 @@ TEST(AudioBufferQueueTest, ReadF32Planar) {
AudioBufferQueue buffer;
// Add 14 frames of data.
- buffer.Append(MakePlanarAudioBuffer<float>(
+ buffer.Append(MakeAudioBuffer<float>(
kSampleFormatPlanarF32, channels, 1.0f, 1.0f, 4, kNoTime, kNoTime));
- buffer.Append(MakePlanarAudioBuffer<float>(
+ buffer.Append(MakeAudioBuffer<float>(
kSampleFormatPlanarF32, channels, 50.0f, 1.0f, 10, kNoTime, kNoTime));
EXPECT_EQ(14, buffer.frames());
@@ -277,9 +277,9 @@ TEST(AudioBufferQueueTest, ReadS16Planar) {
AudioBufferQueue buffer;
// Add 24 frames of data.
- buffer.Append(MakePlanarAudioBuffer<int16>(
+ buffer.Append(MakeAudioBuffer<int16>(
kSampleFormatPlanarS16, channels, 1, 1, 4, kNoTime, kNoTime));
- buffer.Append(MakePlanarAudioBuffer<int16>(
+ buffer.Append(MakeAudioBuffer<int16>(
kSampleFormatPlanarS16, channels, 100, 5, 20, kNoTime, kNoTime));
EXPECT_EQ(24, buffer.frames());
@@ -301,17 +301,17 @@ TEST(AudioBufferQueueTest, ReadManyChannels) {
AudioBufferQueue buffer;
// Add 76 frames of data.
- buffer.Append(MakeInterleavedAudioBuffer<float>(
+ buffer.Append(MakeAudioBuffer<float>(
kSampleFormatF32, channels, 0.0f, 1.0f, 6, kNoTime, kNoTime));
- buffer.Append(MakeInterleavedAudioBuffer<float>(
+ buffer.Append(MakeAudioBuffer<float>(
kSampleFormatF32, channels, 6.0f * channels, 1.0f, 10, kNoTime, kNoTime));
- buffer.Append(MakeInterleavedAudioBuffer<float>(kSampleFormatF32,
- channels,
- 16.0f * channels,
- 1.0f,
- 60,
- kNoTime,
- kNoTime));
+ buffer.Append(MakeAudioBuffer<float>(kSampleFormatF32,
+ channels,
+ 16.0f * channels,
+ 1.0f,
+ 60,
+ kNoTime,
+ kNoTime));
EXPECT_EQ(76, buffer.frames());
// Read 3 frames from the buffer. F32 is interleaved, so ch[0] should be
@@ -330,7 +330,7 @@ TEST(AudioBufferQueueTest, Peek) {
AudioBufferQueue buffer;
// Add 60 frames of data.
- buffer.Append(MakeInterleavedAudioBuffer<float>(
+ buffer.Append(MakeAudioBuffer<float>(
kSampleFormatF32, channels, 0.0f, 1.0f, 60, kNoTime, kNoTime));
EXPECT_EQ(60, buffer.frames());
@@ -381,7 +381,7 @@ TEST(AudioBufferQueueTest, Time) {
// Add two buffers (second one added later):
// first: start=0s, duration=10s
// second: start=30s, duration=10s
- buffer.Append(MakeInterleavedAudioBuffer<int16>(
+ buffer.Append(MakeAudioBuffer<int16>(
kSampleFormatS16, channels, 1, 1, 10, start_time1, duration));
EXPECT_EQ(10, buffer.frames());
@@ -399,7 +399,7 @@ TEST(AudioBufferQueueTest, Time) {
buffer.current_time());
// Add second buffer for more data.
- buffer.Append(MakeInterleavedAudioBuffer<int16>(
+ buffer.Append(MakeAudioBuffer<int16>(
kSampleFormatS16, channels, 1, 1, 10, start_time2, duration));
EXPECT_EQ(16, buffer.frames());
@@ -430,9 +430,9 @@ TEST(AudioBufferQueueTest, NoTime) {
scoped_ptr<AudioBus> bus = AudioBus::Create(channels, 100);
// Add two buffers with no timestamps. Time should always be unknown.
- buffer.Append(MakeInterleavedAudioBuffer<int16>(
+ buffer.Append(MakeAudioBuffer<int16>(
kSampleFormatS16, channels, 1, 1, 10, kNoTime, kNoTime));
- buffer.Append(MakeInterleavedAudioBuffer<int16>(
+ buffer.Append(MakeAudioBuffer<int16>(
kSampleFormatS16, channels, 1, 1, 10, kNoTime, kNoTime));
EXPECT_EQ(20, buffer.frames());
diff --git a/media/base/audio_buffer_unittest.cc b/media/base/audio_buffer_unittest.cc
index 473778a..15f6416 100644
--- a/media/base/audio_buffer_unittest.cc
+++ b/media/base/audio_buffer_unittest.cc
@@ -28,7 +28,7 @@ TEST(AudioBufferTest, CopyFrom) {
const int frames = 8;
const base::TimeDelta start_time;
const base::TimeDelta duration = base::TimeDelta::FromSeconds(frames);
- scoped_refptr<AudioBuffer> buffer = MakeInterleavedAudioBuffer<uint8>(
+ scoped_refptr<AudioBuffer> buffer = MakeAudioBuffer<uint8>(
kSampleFormatU8, channels, 1, 1, frames, start_time, duration);
EXPECT_EQ(frames, buffer->frame_count());
EXPECT_EQ(buffer->timestamp(), start_time);
@@ -63,7 +63,7 @@ TEST(AudioBufferTest, ReadU8) {
const int frames = 4;
const base::TimeDelta start_time;
const base::TimeDelta duration = base::TimeDelta::FromSeconds(frames);
- scoped_refptr<AudioBuffer> buffer = MakeInterleavedAudioBuffer<uint8>(
+ scoped_refptr<AudioBuffer> buffer = MakeAudioBuffer<uint8>(
kSampleFormatU8, channels, 128, 1, frames, start_time, duration);
// Read all 4 frames from the buffer. Data is interleaved, so ch[0] should be
@@ -83,7 +83,7 @@ TEST(AudioBufferTest, ReadS16) {
const int frames = 10;
const base::TimeDelta start_time;
const base::TimeDelta duration = base::TimeDelta::FromSeconds(frames);
- scoped_refptr<AudioBuffer> buffer = MakeInterleavedAudioBuffer<int16>(
+ scoped_refptr<AudioBuffer> buffer = MakeAudioBuffer<int16>(
kSampleFormatS16, channels, 1, 1, frames, start_time, duration);
// Read 6 frames from the buffer. Data is interleaved, so ch[0] should be 1,
@@ -108,7 +108,7 @@ TEST(AudioBufferTest, ReadS32) {
const int frames = 6;
const base::TimeDelta start_time;
const base::TimeDelta duration = base::TimeDelta::FromSeconds(frames);
- scoped_refptr<AudioBuffer> buffer = MakeInterleavedAudioBuffer<int32>(
+ scoped_refptr<AudioBuffer> buffer = MakeAudioBuffer<int32>(
kSampleFormatS32, channels, 1, 1, frames, start_time, duration);
// Read 6 frames from the buffer. Data is interleaved, so ch[0] should be 1,
@@ -131,7 +131,7 @@ TEST(AudioBufferTest, ReadF32) {
const int frames = 20;
const base::TimeDelta start_time;
const base::TimeDelta duration = base::TimeDelta::FromSeconds(frames);
- scoped_refptr<AudioBuffer> buffer = MakeInterleavedAudioBuffer<float>(
+ scoped_refptr<AudioBuffer> buffer = MakeAudioBuffer<float>(
kSampleFormatF32, channels, 1.0f, 1.0f, frames, start_time, duration);
// Read first 10 frames from the buffer. F32 is interleaved, so ch[0] should
@@ -153,7 +153,7 @@ TEST(AudioBufferTest, ReadS16Planar) {
const int frames = 20;
const base::TimeDelta start_time;
const base::TimeDelta duration = base::TimeDelta::FromSeconds(frames);
- scoped_refptr<AudioBuffer> buffer = MakePlanarAudioBuffer<int16>(
+ scoped_refptr<AudioBuffer> buffer = MakeAudioBuffer<int16>(
kSampleFormatPlanarS16, channels, 1, 1, frames, start_time, duration);
// Read 6 frames from the buffer. Data is planar, so ch[0] should be 1, 2, 3,
@@ -187,13 +187,13 @@ TEST(AudioBufferTest, ReadF32Planar) {
const base::TimeDelta start_time;
const base::TimeDelta duration = base::TimeDelta::FromSeconds(frames);
scoped_refptr<AudioBuffer> buffer =
- MakePlanarAudioBuffer<float>(kSampleFormatPlanarF32,
- channels,
- 1.0f,
- 1.0f,
- frames,
- start_time,
- duration);
+ MakeAudioBuffer<float>(kSampleFormatPlanarF32,
+ channels,
+ 1.0f,
+ 1.0f,
+ frames,
+ start_time,
+ duration);
// Read all 100 frames from the buffer. F32 is planar, so ch[0] should be 1,
// 2, 3, 4, ..., ch[1] should be 101, 102, 103, ..., and so on for all 4
@@ -241,13 +241,13 @@ TEST(AudioBufferTest, Trim) {
const base::TimeDelta start_time;
const base::TimeDelta duration = base::TimeDelta::FromSeconds(frames);
scoped_refptr<AudioBuffer> buffer =
- MakePlanarAudioBuffer<float>(kSampleFormatPlanarF32,
- channels,
- 1.0f,
- 1.0f,
- frames,
- start_time,
- duration);
+ MakeAudioBuffer<float>(kSampleFormatPlanarF32,
+ channels,
+ 1.0f,
+ 1.0f,
+ frames,
+ start_time,
+ duration);
EXPECT_EQ(frames, buffer->frame_count());
EXPECT_EQ(start_time, buffer->timestamp());
EXPECT_EQ(frames, buffer->duration().InSeconds());
diff --git a/media/base/audio_splicer_unittest.cc b/media/base/audio_splicer_unittest.cc
index 998a9a3..0acd37e 100644
--- a/media/base/audio_splicer_unittest.cc
+++ b/media/base/audio_splicer_unittest.cc
@@ -34,7 +34,7 @@ class AudioSplicerTest : public ::testing::Test {
}
scoped_refptr<AudioBuffer> GetNextInputBuffer(float value, int frame_size) {
- scoped_refptr<AudioBuffer> buffer = MakeInterleavedAudioBuffer<float>(
+ scoped_refptr<AudioBuffer> buffer = MakeAudioBuffer<float>(
kSampleFormat,
kChannels,
value,
diff --git a/media/base/test_helpers.cc b/media/base/test_helpers.cc
index 672f8c2..57ac40d 100644
--- a/media/base/test_helpers.cc
+++ b/media/base/test_helpers.cc
@@ -149,100 +149,62 @@ gfx::Size TestVideoConfig::LargeCodedSize() {
}
template <class T>
-scoped_refptr<AudioBuffer> MakeInterleavedAudioBuffer(
- SampleFormat format,
- int channels,
- T start,
- T increment,
- int frames,
- base::TimeDelta start_time,
- base::TimeDelta duration) {
- DCHECK(format == kSampleFormatU8 || format == kSampleFormatS16 ||
- format == kSampleFormatS32 || format == kSampleFormatF32);
+scoped_refptr<AudioBuffer> MakeAudioBuffer(SampleFormat format,
+ int channels,
+ T start,
+ T increment,
+ int frames,
+ base::TimeDelta timestamp,
+ base::TimeDelta duration) {
+ scoped_refptr<AudioBuffer> output =
+ AudioBuffer::CreateBuffer(format, channels, frames);
+ output->set_timestamp(timestamp);
+ output->set_duration(duration);
// Create a block of memory with values:
// start
// start + increment
// start + 2 * increment, ...
- // Since this is interleaved data, channel 0 data will be:
+ // For interleaved data, raw data will be:
// start
// start + channels * increment
// start + 2 * channels * increment, ...
- int buffer_size = frames * channels * sizeof(T);
- scoped_ptr<uint8[]> memory(new uint8[buffer_size]);
- uint8* data[] = { memory.get() };
- T* buffer = reinterpret_cast<T*>(memory.get());
- for (int i = 0; i < frames * channels; ++i) {
- buffer[i] = start;
- start += increment;
- }
- return AudioBuffer::CopyFrom(
- format, channels, frames, data, start_time, duration);
-}
-
-template <class T>
-scoped_refptr<AudioBuffer> MakePlanarAudioBuffer(
- SampleFormat format,
- int channels,
- T start,
- T increment,
- int frames,
- base::TimeDelta start_time,
- base::TimeDelta duration) {
- DCHECK(format == kSampleFormatPlanarF32 || format == kSampleFormatPlanarS16);
-
- // Create multiple blocks of data, one for each channel.
- // Values in channel 0 will be:
+ //
+ // For planar data, values in channel 0 will be:
// start
// start + increment
// start + 2 * increment, ...
- // Values in channel 1 will be:
+ // While, values in channel 1 will be:
// start + frames * increment
// start + (frames + 1) * increment
// start + (frames + 2) * increment, ...
- int buffer_size = frames * sizeof(T);
- scoped_ptr<uint8*[]> data(new uint8*[channels]);
- scoped_ptr<uint8[]> memory(new uint8[channels * buffer_size]);
- for (int i = 0; i < channels; ++i) {
- data.get()[i] = memory.get() + i * buffer_size;
- T* buffer = reinterpret_cast<T*>(data.get()[i]);
- for (int j = 0; j < frames; ++j) {
- buffer[j] = start;
+ const size_t output_size =
+ output->channel_data().size() == 1 ? frames * channels : frames;
+ for (size_t ch = 0; ch < output->channel_data().size(); ++ch) {
+ T* buffer = reinterpret_cast<T*>(output->channel_data()[ch]);
+ for (size_t i = 0; i < output_size; ++i) {
+ buffer[i] = start;
start += increment;
}
}
- return AudioBuffer::CopyFrom(
- format, channels, frames, data.get(), start_time, duration);
-}
-
-// Instantiate all the types of MakeInterleavedAudioBuffer() and
-// MakePlanarAudioBuffer() needed.
-
-#define DEFINE_INTERLEAVED_INSTANCE(type) \
- template scoped_refptr<AudioBuffer> MakeInterleavedAudioBuffer<type>( \
- SampleFormat format, \
- int channels, \
- type start, \
- type increment, \
- int frames, \
- base::TimeDelta start_time, \
+ return output;
+}
+
+// Instantiate all the types of MakeAudioBuffer() and
+// MakeAudioBuffer() needed.
+#define DEFINE_MAKE_AUDIO_BUFFER_INSTANCE(type) \
+ template scoped_refptr<AudioBuffer> MakeAudioBuffer<type>( \
+ SampleFormat format, \
+ int channels, \
+ type start, \
+ type increment, \
+ int frames, \
+ base::TimeDelta start_time, \
base::TimeDelta duration)
-DEFINE_INTERLEAVED_INSTANCE(uint8);
-DEFINE_INTERLEAVED_INSTANCE(int16);
-DEFINE_INTERLEAVED_INSTANCE(int32);
-DEFINE_INTERLEAVED_INSTANCE(float);
-
-#define DEFINE_PLANAR_INSTANCE(type) \
- template scoped_refptr<AudioBuffer> MakePlanarAudioBuffer<type>( \
- SampleFormat format, \
- int channels, \
- type start, \
- type increment, \
- int frames, \
- base::TimeDelta start_time, \
- base::TimeDelta duration);
-DEFINE_PLANAR_INSTANCE(int16);
-DEFINE_PLANAR_INSTANCE(float);
+DEFINE_MAKE_AUDIO_BUFFER_INSTANCE(uint8);
+DEFINE_MAKE_AUDIO_BUFFER_INSTANCE(int16);
+DEFINE_MAKE_AUDIO_BUFFER_INSTANCE(int32);
+DEFINE_MAKE_AUDIO_BUFFER_INSTANCE(float);
static const char kFakeVideoBufferHeader[] = "FakeVideoBufferForTest";
diff --git a/media/base/test_helpers.h b/media/base/test_helpers.h
index 872d08d..ee18f53 100644
--- a/media/base/test_helpers.h
+++ b/media/base/test_helpers.h
@@ -85,9 +85,11 @@ class TestVideoConfig {
};
// Create an AudioBuffer containing |frames| frames of data, where each sample
-// is of type T. Each frame will have the data from |channels| channels
-// interleaved. |start| and |increment| are used to specify the values for the
-// samples. Since this is interleaved data, channel 0 data will be:
+// is of type T.
+//
+// For interleaved formats, each frame will have the data from |channels|
+// channels interleaved. |start| and |increment| are used to specify the values
+// for the samples. Since this is interleaved data, channel 0 data will be:
// |start|
// |start| + |channels| * |increment|
// |start| + 2 * |channels| * |increment|, and so on.
@@ -95,23 +97,10 @@ class TestVideoConfig {
// requires data to be of type T, but it is verified that |format| is an
// interleaved format.
//
-// |start_time| will be used as the start time for the samples. |duration| is
-// the duration.
-template <class T>
-scoped_refptr<AudioBuffer> MakeInterleavedAudioBuffer(
- SampleFormat format,
- int channels,
- T start,
- T increment,
- int frames,
- base::TimeDelta start_time,
- base::TimeDelta duration);
-
-// Create an AudioBuffer containing |frames| frames of data, where each sample
-// is of type T. Since this is planar data, there will be a block for each of
-// |channel| channels. |start| and |increment| are used to specify the values
-// for the samples, which are created in channel order. Since this is planar
-// data, channel 0 data will be:
+// For planar formats, there will be a block for each of |channel| channels.
+// |start| and |increment| are used to specify the values for the samples, which
+// are created in channel order. Since this is planar data, channel 0 data will
+// be:
// |start|
// |start| + |increment|
// |start| + 2 * |increment|, and so on.
@@ -122,14 +111,13 @@ scoped_refptr<AudioBuffer> MakeInterleavedAudioBuffer(
// |start_time| will be used as the start time for the samples. |duration| is
// the duration.
template <class T>
-scoped_refptr<AudioBuffer> MakePlanarAudioBuffer(
- SampleFormat format,
- int channels,
- T start,
- T increment,
- int frames,
- base::TimeDelta start_time,
- base::TimeDelta duration);
+scoped_refptr<AudioBuffer> MakeAudioBuffer(SampleFormat format,
+ int channels,
+ T start,
+ T increment,
+ int frames,
+ base::TimeDelta timestamp,
+ base::TimeDelta duration);
// Create a fake video DecoderBuffer for testing purpose. The buffer contains
// part of video decoder config info embedded so that the testing code can do
diff --git a/media/filters/audio_renderer_algorithm_unittest.cc b/media/filters/audio_renderer_algorithm_unittest.cc
index aab4a9d..b05e64a 100644
--- a/media/filters/audio_renderer_algorithm_unittest.cc
+++ b/media/filters/audio_renderer_algorithm_unittest.cc
@@ -101,31 +101,31 @@ class AudioRendererAlgorithmTest : public testing::Test {
while (!algorithm_.IsQueueFull()) {
switch (sample_format_) {
case kSampleFormatU8:
- buffer = MakeInterleavedAudioBuffer<uint8>(sample_format_,
- channels_,
- 1,
- 1,
- kFrameSize,
- kNoTimestamp(),
- kNoTimestamp());
+ buffer = MakeAudioBuffer<uint8>(sample_format_,
+ channels_,
+ 1,
+ 1,
+ kFrameSize,
+ kNoTimestamp(),
+ kNoTimestamp());
break;
case kSampleFormatS16:
- buffer = MakeInterleavedAudioBuffer<int16>(sample_format_,
- channels_,
- 1,
- 1,
- kFrameSize,
- kNoTimestamp(),
- kNoTimestamp());
+ buffer = MakeAudioBuffer<int16>(sample_format_,
+ channels_,
+ 1,
+ 1,
+ kFrameSize,
+ kNoTimestamp(),
+ kNoTimestamp());
break;
case kSampleFormatS32:
- buffer = MakeInterleavedAudioBuffer<int32>(sample_format_,
- channels_,
- 1,
- 1,
- kFrameSize,
- kNoTimestamp(),
- kNoTimestamp());
+ buffer = MakeAudioBuffer<int32>(sample_format_,
+ channels_,
+ 1,
+ 1,
+ kFrameSize,
+ kNoTimestamp(),
+ kNoTimestamp());
break;
default:
NOTREACHED() << "Unrecognized format " << sample_format_;
diff --git a/media/filters/audio_renderer_impl_unittest.cc b/media/filters/audio_renderer_impl_unittest.cc
index c84ccba..ef215a0 100644
--- a/media/filters/audio_renderer_impl_unittest.cc
+++ b/media/filters/audio_renderer_impl_unittest.cc
@@ -302,13 +302,13 @@ class AudioRendererImplTest : public ::testing::Test {
CHECK(!read_cb_.is_null());
scoped_refptr<AudioBuffer> buffer =
- MakePlanarAudioBuffer<float>(kSampleFormat,
- kChannels,
- kPlayingAudio,
- 0.0f,
- size,
- next_timestamp_->GetTimestamp(),
- next_timestamp_->GetFrameDuration(size));
+ MakeAudioBuffer<float>(kSampleFormat,
+ kChannels,
+ kPlayingAudio,
+ 0.0f,
+ size,
+ next_timestamp_->GetTimestamp(),
+ next_timestamp_->GetFrameDuration(size));
next_timestamp_->AddFrames(size);
DeliverBuffer(AudioDecoder::kOk, buffer);