summaryrefslogtreecommitdiffstats
path: root/media
diff options
context:
space:
mode:
authordalecurtis@chromium.org <dalecurtis@chromium.org@0039d316-1c4b-4281-b951-d872f2087c98>2014-05-02 09:55:49 +0000
committerdalecurtis@chromium.org <dalecurtis@chromium.org@0039d316-1c4b-4281-b951-d872f2087c98>2014-05-02 09:55:49 +0000
commitc1674a529fb13fa97fa108a0dd5d7f93198a5e25 (patch)
treeca42a015934f5bcd4d48ff0f40ebbf95d61eae44 /media
parent7bd79e2ef968908125e7c5baf98e9be422eb39ea (diff)
downloadchromium_src-c1674a529fb13fa97fa108a0dd5d7f93198a5e25.zip
chromium_src-c1674a529fb13fa97fa108a0dd5d7f93198a5e25.tar.gz
chromium_src-c1674a529fb13fa97fa108a0dd5d7f93198a5e25.tar.bz2
Remove AudioBuffer::set_duration(), instead base on frames.
wolenetz@ asked on https://codereview.chromium.org/251893002/ why we allow set_duration() to be called. It used to be required since AudioBuffers had no concept of sample rate. However, we now attach a sample rate to every AudioBuffer which allows us to enforce a valid duration for every buffer. This CL also aligns MakeAudioBuffer() to do the same thing with planar and interleaved data for the sake of easier testing. BUG=none TEST=media_unittests NOTRY=true Review URL: https://codereview.chromium.org/261533002 git-svn-id: svn://svn.chromium.org/chrome/trunk/src@267779 0039d316-1c4b-4281-b951-d872f2087c98
Diffstat (limited to 'media')
-rw-r--r--media/base/audio_buffer.cc50
-rw-r--r--media/base/audio_buffer.h12
-rw-r--r--media/base/audio_buffer_converter.cc2
-rw-r--r--media/base/audio_buffer_converter_unittest.cc1
-rw-r--r--media/base/audio_buffer_queue_unittest.cc216
-rw-r--r--media/base/audio_buffer_unittest.cc298
-rw-r--r--media/base/audio_discard_helper.cc4
-rw-r--r--media/base/audio_discard_helper_unittest.cc1
-rw-r--r--media/base/audio_splicer.cc33
-rw-r--r--media/base/audio_splicer_unittest.cc35
-rw-r--r--media/base/test_helpers.cc53
-rw-r--r--media/base/test_helpers.h43
-rw-r--r--media/filters/audio_renderer_algorithm_unittest.cc3
-rw-r--r--media/filters/audio_renderer_impl_unittest.cc3
-rw-r--r--media/filters/decrypting_audio_decoder.cc2
-rw-r--r--media/filters/decrypting_audio_decoder_unittest.cc3
-rw-r--r--media/filters/ffmpeg_audio_decoder_unittest.cc2
17 files changed, 315 insertions, 446 deletions
diff --git a/media/base/audio_buffer.cc b/media/base/audio_buffer.cc
index 4b972b9..25e8dbe 100644
--- a/media/base/audio_buffer.cc
+++ b/media/base/audio_buffer.cc
@@ -11,6 +11,11 @@
namespace media {
+static base::TimeDelta CalculateDuration(int frames, double sample_rate) {
+ return base::TimeDelta::FromMicroseconds(
+ frames * base::Time::kMicrosecondsPerSecond / sample_rate);
+}
+
AudioBuffer::AudioBuffer(SampleFormat sample_format,
ChannelLayout channel_layout,
int channel_count,
@@ -18,8 +23,7 @@ AudioBuffer::AudioBuffer(SampleFormat sample_format,
int frame_count,
bool create_buffer,
const uint8* const* data,
- const base::TimeDelta timestamp,
- const base::TimeDelta duration)
+ const base::TimeDelta timestamp)
: sample_format_(sample_format),
channel_layout_(channel_layout),
channel_count_(channel_count),
@@ -28,7 +32,7 @@ AudioBuffer::AudioBuffer(SampleFormat sample_format,
trim_start_(0),
end_of_stream_(!create_buffer && data == NULL && frame_count == 0),
timestamp_(timestamp),
- duration_(duration) {
+ duration_(CalculateDuration(adjusted_frame_count_, sample_rate_)) {
CHECK_GE(channel_count_, 0);
CHECK_LE(channel_count_, limits::kMaxChannels);
CHECK_GE(frame_count, 0);
@@ -91,8 +95,7 @@ scoped_refptr<AudioBuffer> AudioBuffer::CopyFrom(
int sample_rate,
int frame_count,
const uint8* const* data,
- const base::TimeDelta timestamp,
- const base::TimeDelta duration) {
+ const base::TimeDelta timestamp) {
// If you hit this CHECK you likely have a bug in a demuxer. Go fix it.
CHECK_GT(frame_count, 0); // Otherwise looks like an EOF buffer.
CHECK(data[0]);
@@ -103,8 +106,7 @@ scoped_refptr<AudioBuffer> AudioBuffer::CopyFrom(
frame_count,
true,
data,
- timestamp,
- duration));
+ timestamp));
}
// static
@@ -122,7 +124,6 @@ scoped_refptr<AudioBuffer> AudioBuffer::CreateBuffer(
frame_count,
true,
NULL,
- kNoTimestamp(),
kNoTimestamp()));
}
@@ -132,8 +133,7 @@ scoped_refptr<AudioBuffer> AudioBuffer::CreateEmptyBuffer(
int channel_count,
int sample_rate,
int frame_count,
- const base::TimeDelta timestamp,
- const base::TimeDelta duration) {
+ const base::TimeDelta timestamp) {
CHECK_GT(frame_count, 0); // Otherwise looks like an EOF buffer.
// Since data == NULL, format doesn't matter.
return make_scoped_refptr(new AudioBuffer(kSampleFormatF32,
@@ -143,8 +143,7 @@ scoped_refptr<AudioBuffer> AudioBuffer::CreateEmptyBuffer(
frame_count,
false,
NULL,
- timestamp,
- duration));
+ timestamp));
}
// static
@@ -156,7 +155,6 @@ scoped_refptr<AudioBuffer> AudioBuffer::CreateEOSBuffer() {
0,
false,
NULL,
- kNoTimestamp(),
kNoTimestamp()));
}
@@ -246,33 +244,23 @@ void AudioBuffer::TrimStart(int frames_to_trim) {
CHECK_GE(frames_to_trim, 0);
CHECK_LE(frames_to_trim, adjusted_frame_count_);
- // Adjust timestamp_ and duration_ to reflect the smaller number of frames.
- double offset = static_cast<double>(duration_.InMicroseconds()) *
- frames_to_trim / adjusted_frame_count_;
- base::TimeDelta offset_as_time =
- base::TimeDelta::FromMicroseconds(static_cast<int64>(offset));
- timestamp_ += offset_as_time;
- duration_ -= offset_as_time;
-
- // Finally adjust the number of frames in this buffer and where the start
- // really is.
+ // Adjust the number of frames in this buffer and where the start really is.
adjusted_frame_count_ -= frames_to_trim;
trim_start_ += frames_to_trim;
+
+ // Adjust timestamp_ and duration_ to reflect the smaller number of frames.
+ const base::TimeDelta old_duration = duration_;
+ duration_ = CalculateDuration(adjusted_frame_count_, sample_rate_);
+ timestamp_ += old_duration - duration_;
}
void AudioBuffer::TrimEnd(int frames_to_trim) {
CHECK_GE(frames_to_trim, 0);
CHECK_LE(frames_to_trim, adjusted_frame_count_);
- // Adjust duration_ only to reflect the smaller number of frames.
- double offset = static_cast<double>(duration_.InMicroseconds()) *
- frames_to_trim / adjusted_frame_count_;
- base::TimeDelta offset_as_time =
- base::TimeDelta::FromMicroseconds(static_cast<int64>(offset));
- duration_ -= offset_as_time;
-
- // Finally adjust the number of frames in this buffer.
+ // Adjust the number of frames and duration for this buffer.
adjusted_frame_count_ -= frames_to_trim;
+ duration_ = CalculateDuration(adjusted_frame_count_, sample_rate_);
}
} // namespace media
diff --git a/media/base/audio_buffer.h b/media/base/audio_buffer.h
index 5d7ab7f..4ccd3a8f 100644
--- a/media/base/audio_buffer.h
+++ b/media/base/audio_buffer.h
@@ -34,16 +34,13 @@ class MEDIA_EXPORT AudioBuffer
// number of buffers must be equal to |channel_count|. |frame_count| is the
// number of frames in each buffer. |data| must not be null and |frame_count|
// must be >= 0.
- //
- // TODO(jrummell): Compute duration rather than pass it in.
static scoped_refptr<AudioBuffer> CopyFrom(SampleFormat sample_format,
ChannelLayout channel_layout,
int channel_count,
int sample_rate,
int frame_count,
const uint8* const* data,
- const base::TimeDelta timestamp,
- const base::TimeDelta duration);
+ const base::TimeDelta timestamp);
// Create an AudioBuffer with |frame_count| frames. Buffer is allocated, but
// not initialized. Timestamp and duration are set to kNoTimestamp().
@@ -59,8 +56,7 @@ class MEDIA_EXPORT AudioBuffer
int channel_count,
int sample_rate,
int frame_count,
- const base::TimeDelta timestamp,
- const base::TimeDelta duration);
+ const base::TimeDelta timestamp);
// Create a AudioBuffer indicating we've reached end of stream.
// Calling any method other than end_of_stream() on the resulting buffer
@@ -102,7 +98,6 @@ class MEDIA_EXPORT AudioBuffer
base::TimeDelta timestamp() const { return timestamp_; }
base::TimeDelta duration() const { return duration_; }
void set_timestamp(base::TimeDelta timestamp) { timestamp_ = timestamp; }
- void set_duration(base::TimeDelta duration) { duration_ = duration; }
// If there's no data in this buffer, it represents end of stream.
bool end_of_stream() const { return end_of_stream_; }
@@ -126,8 +121,7 @@ class MEDIA_EXPORT AudioBuffer
int frame_count,
bool create_buffer,
const uint8* const* data,
- const base::TimeDelta timestamp,
- const base::TimeDelta duration);
+ const base::TimeDelta timestamp);
virtual ~AudioBuffer();
diff --git a/media/base/audio_buffer_converter.cc b/media/base/audio_buffer_converter.cc
index 74e570d..59c6681 100644
--- a/media/base/audio_buffer_converter.cc
+++ b/media/base/audio_buffer_converter.cc
@@ -227,8 +227,6 @@ void AudioBufferConverter::ConvertIfPossible() {
// Compute the timestamp.
output_buffer->set_timestamp(timestamp_helper_.GetTimestamp());
- output_buffer->set_duration(
- timestamp_helper_.GetFrameDuration(request_frames));
timestamp_helper_.AddFrames(request_frames);
queued_outputs_.push_back(output_buffer);
diff --git a/media/base/audio_buffer_converter_unittest.cc b/media/base/audio_buffer_converter_unittest.cc
index c5c816c..3445996 100644
--- a/media/base/audio_buffer_converter_unittest.cc
+++ b/media/base/audio_buffer_converter_unittest.cc
@@ -29,7 +29,6 @@ static scoped_refptr<AudioBuffer> MakeTestBuffer(int sample_rate,
0,
1,
frames,
- base::TimeDelta::FromSeconds(0),
base::TimeDelta::FromSeconds(0));
}
diff --git a/media/base/audio_buffer_queue_unittest.cc b/media/base/audio_buffer_queue_unittest.cc
index fc04857..e6a148a 100644
--- a/media/base/audio_buffer_queue_unittest.cc
+++ b/media/base/audio_buffer_queue_unittest.cc
@@ -5,7 +5,6 @@
#include "base/basictypes.h"
#include "base/logging.h"
#include "base/memory/scoped_ptr.h"
-#include "base/strings/stringprintf.h"
#include "base/time/time.h"
#include "media/base/audio_buffer.h"
#include "media/base/audio_buffer_queue.h"
@@ -18,15 +17,18 @@ namespace media {
const int kSampleRate = 44100;
-static void VerifyResult(float* channel_data,
- int frames,
- float start,
- float increment) {
- for (int i = 0; i < frames; ++i) {
- SCOPED_TRACE(base::StringPrintf(
- "i=%d/%d start=%f, increment=%f", i, frames, start, increment));
- ASSERT_EQ(start, channel_data[i]);
- start += increment;
+static void VerifyBus(AudioBus* bus,
+ int offset,
+ int frames,
+ int buffer_size,
+ float start,
+ float increment) {
+ for (int ch = 0; ch < bus->channels(); ++ch) {
+ const float v = start + ch * buffer_size * increment;
+ for (int i = offset; i < frames; ++i) {
+ ASSERT_FLOAT_EQ(v + (i - offset) * increment, bus->channel(ch)[i])
+ << "i=" << i << ", ch=" << ch;
+ }
}
}
@@ -34,18 +36,16 @@ template <typename T>
static scoped_refptr<AudioBuffer> MakeTestBuffer(SampleFormat format,
ChannelLayout channel_layout,
T start,
- T end,
+ T step,
int frames) {
- const base::TimeDelta kNoTime = kNoTimestamp();
return MakeAudioBuffer<T>(format,
channel_layout,
ChannelLayoutToChannelCount(channel_layout),
kSampleRate,
start,
- end,
+ step,
frames,
- kNoTime,
- kNoTime);
+ kNoTimestamp());
}
TEST(AudioBufferQueueTest, AppendAndClear) {
@@ -96,7 +96,7 @@ TEST(AudioBufferQueueTest, IteratorCheck) {
EXPECT_EQ(4, buffer.ReadFrames(4, 0, bus.get()));
EXPECT_EQ(4, buffer.frames());
- VerifyResult(bus->channel(0), 4, 10.0f, 1.0f);
+ VerifyBus(bus.get(), 0, 4, bus->frames(), 10, 1);
buffer.Append(MakeTestBuffer<float>(
kSampleFormatF32, channel_layout, 20.0f, 1.0f, 8));
@@ -108,7 +108,7 @@ TEST(AudioBufferQueueTest, IteratorCheck) {
buffer.SeekFrames(16);
EXPECT_EQ(4, buffer.ReadFrames(4, 0, bus.get()));
EXPECT_EQ(0, buffer.frames());
- VerifyResult(bus->channel(0), 4, 34.0f, 1.0f);
+ VerifyBus(bus.get(), 0, 4, bus->frames(), 34, 1);
buffer.Append(MakeTestBuffer<float>(
kSampleFormatF32, channel_layout, 40.0f, 1.0f, 8));
@@ -118,13 +118,13 @@ TEST(AudioBufferQueueTest, IteratorCheck) {
EXPECT_EQ(16, buffer.frames());
EXPECT_EQ(4, buffer.ReadFrames(4, 0, bus.get()));
- VerifyResult(bus->channel(0), 4, 40.0f, 1.0f);
+ VerifyBus(bus.get(), 0, 4, bus->frames(), 40, 1);
// Read off the end of the buffer.
EXPECT_EQ(12, buffer.frames());
buffer.SeekFrames(8);
EXPECT_EQ(4, buffer.ReadFrames(100, 0, bus.get()));
- VerifyResult(bus->channel(0), 4, 54.0f, 1.0f);
+ VerifyBus(bus.get(), 0, 4, bus->frames(), 54, 1);
}
TEST(AudioBufferQueueTest, Seek) {
@@ -162,19 +162,17 @@ TEST(AudioBufferQueueTest, ReadF32) {
MakeTestBuffer<float>(kSampleFormatF32, channel_layout, 33.0f, 1.0f, 60));
EXPECT_EQ(76, buffer.frames());
- // Read 3 frames from the buffer. F32 is interleaved, so ch[0] should be
- // 1, 3, 5, and ch[1] should be 2, 4, 6.
+ // Read 3 frames from the buffer.
scoped_ptr<AudioBus> bus = AudioBus::Create(channels, 100);
EXPECT_EQ(3, buffer.ReadFrames(3, 0, bus.get()));
EXPECT_EQ(73, buffer.frames());
- VerifyResult(bus->channel(0), 3, 1.0f, 2.0f);
- VerifyResult(bus->channel(1), 3, 2.0f, 2.0f);
+ VerifyBus(bus.get(), 0, 3, 6, 1, 1);
// Now read 5 frames, which will span buffers. Append the data into AudioBus.
EXPECT_EQ(5, buffer.ReadFrames(5, 3, bus.get()));
EXPECT_EQ(68, buffer.frames());
- VerifyResult(bus->channel(0), 8, 1.0f, 2.0f);
- VerifyResult(bus->channel(1), 8, 2.0f, 2.0f);
+ VerifyBus(bus.get(), 0, 6, 6, 1, 1);
+ VerifyBus(bus.get(), 6, 2, 10, 13, 1);
// Now skip into the third buffer.
buffer.SeekFrames(20);
@@ -182,30 +180,24 @@ TEST(AudioBufferQueueTest, ReadF32) {
// Now read 2 frames, which are in the third buffer.
EXPECT_EQ(2, buffer.ReadFrames(2, 0, bus.get()));
- VerifyResult(bus->channel(0), 2, 57.0f, 2.0f);
- VerifyResult(bus->channel(1), 2, 58.0f, 2.0f);
+ VerifyBus(bus.get(), 0, 2, 60, 45, 1);
}
TEST(AudioBufferQueueTest, ReadU8) {
const ChannelLayout channel_layout = CHANNEL_LAYOUT_4_0;
const int channels = ChannelLayoutToChannelCount(channel_layout);
+ const int frames = 4;
AudioBufferQueue buffer;
// Add 4 frames of data.
buffer.Append(
- MakeTestBuffer<uint8>(kSampleFormatU8, channel_layout, 128, 1, 4));
+ MakeTestBuffer<uint8>(kSampleFormatU8, channel_layout, 128, 1, frames));
- // Read all 4 frames from the buffer. Data is interleaved, so ch[0] should be
- // 128, 132, 136, 140, other channels similar. However, values are converted
- // from [0, 255] to [-1.0, 1.0] with a bias of 128. Thus the first buffer
- // value should be 0.0, then 1/127, 2/127, etc.
- scoped_ptr<AudioBus> bus = AudioBus::Create(channels, 100);
- EXPECT_EQ(4, buffer.ReadFrames(4, 0, bus.get()));
+ // Read all 4 frames from the buffer.
+ scoped_ptr<AudioBus> bus = AudioBus::Create(channels, frames);
+ EXPECT_EQ(frames, buffer.ReadFrames(frames, 0, bus.get()));
EXPECT_EQ(0, buffer.frames());
- VerifyResult(bus->channel(0), 4, 0.0f, 4.0f / 127.0f);
- VerifyResult(bus->channel(1), 4, 1.0f / 127.0f, 4.0f / 127.0f);
- VerifyResult(bus->channel(2), 4, 2.0f / 127.0f, 4.0f / 127.0f);
- VerifyResult(bus->channel(3), 4, 3.0f / 127.0f, 4.0f / 127.0f);
+ VerifyBus(bus.get(), 0, frames, bus->frames(), 0, 1.0f / 127.0f);
}
TEST(AudioBufferQueueTest, ReadS16) {
@@ -220,14 +212,13 @@ TEST(AudioBufferQueueTest, ReadS16) {
MakeTestBuffer<int16>(kSampleFormatS16, channel_layout, 9, 1, 20));
EXPECT_EQ(24, buffer.frames());
- // Read 6 frames from the buffer. Data is interleaved, so ch[0] should be
- // 1, 3, 5, 7, 9, 11, and ch[1] should be 2, 4, 6, 8, 10, 12.
- // Data is converted to float from -1.0 to 1.0 based on int16 range.
- scoped_ptr<AudioBus> bus = AudioBus::Create(channels, 100);
- EXPECT_EQ(6, buffer.ReadFrames(6, 0, bus.get()));
+ // Read 6 frames from the buffer.
+ const int frames = 6;
+ scoped_ptr<AudioBus> bus = AudioBus::Create(channels, buffer.frames());
+ EXPECT_EQ(frames, buffer.ReadFrames(frames, 0, bus.get()));
EXPECT_EQ(18, buffer.frames());
- VerifyResult(bus->channel(0), 6, 1.0f / kint16max, 2.0f / kint16max);
- VerifyResult(bus->channel(1), 6, 2.0f / kint16max, 2.0f / kint16max);
+ VerifyBus(bus.get(), 0, 4, 4, 1.0f / kint16max, 1.0f / kint16max);
+ VerifyBus(bus.get(), 4, 2, 20, 9.0f / kint16max, 1.0f / kint16max);
}
TEST(AudioBufferQueueTest, ReadS32) {
@@ -242,20 +233,17 @@ TEST(AudioBufferQueueTest, ReadS32) {
MakeTestBuffer<int32>(kSampleFormatS32, channel_layout, 9, 1, 20));
EXPECT_EQ(24, buffer.frames());
- // Read 6 frames from the buffer. Data is interleaved, so ch[0] should be
- // 1, 3, 5, 7, 100, 106, and ch[1] should be 2, 4, 6, 8, 103, 109.
- // Data is converted to float from -1.0 to 1.0 based on int32 range.
+ // Read 6 frames from the buffer.
scoped_ptr<AudioBus> bus = AudioBus::Create(channels, 100);
EXPECT_EQ(6, buffer.ReadFrames(6, 0, bus.get()));
EXPECT_EQ(18, buffer.frames());
- VerifyResult(bus->channel(0), 6, 1.0f / kint32max, 2.0f / kint32max);
- VerifyResult(bus->channel(1), 6, 2.0f / kint32max, 2.0f / kint32max);
+ VerifyBus(bus.get(), 0, 4, 4, 1.0f / kint32max, 1.0f / kint32max);
+ VerifyBus(bus.get(), 4, 2, 20, 9.0f / kint32max, 1.0f / kint32max);
// Read the next 2 frames.
EXPECT_EQ(2, buffer.ReadFrames(2, 0, bus.get()));
EXPECT_EQ(16, buffer.frames());
- VerifyResult(bus->channel(0), 2, 13.0f / kint32max, 2.0f / kint32max);
- VerifyResult(bus->channel(1), 2, 14.0f / kint32max, 2.0f / kint32max);
+ VerifyBus(bus.get(), 0, 2, 20, 11.0f / kint32max, 1.0f / kint32max);
}
TEST(AudioBufferQueueTest, ReadF32Planar) {
@@ -270,15 +258,12 @@ TEST(AudioBufferQueueTest, ReadF32Planar) {
kSampleFormatPlanarF32, channel_layout, 50.0f, 1.0f, 10));
EXPECT_EQ(14, buffer.frames());
- // Read 6 frames from the buffer. F32 is planar, so ch[0] should be
- // 1, 2, 3, 4, 50, 51, and ch[1] should be 5, 6, 7, 8, 60, 61.
+ // Read 6 frames from the buffer.
scoped_ptr<AudioBus> bus = AudioBus::Create(channels, 100);
EXPECT_EQ(6, buffer.ReadFrames(6, 0, bus.get()));
EXPECT_EQ(8, buffer.frames());
- VerifyResult(bus->channel(0), 4, 1.0f, 1.0f);
- VerifyResult(bus->channel(0) + 4, 2, 50.0f, 1.0f);
- VerifyResult(bus->channel(1), 4, 5.0f, 1.0f);
- VerifyResult(bus->channel(1) + 4, 2, 60.0f, 1.0f);
+ VerifyBus(bus.get(), 0, 4, 4, 1, 1);
+ VerifyBus(bus.get(), 4, 2, 10, 50, 1);
}
TEST(AudioBufferQueueTest, ReadS16Planar) {
@@ -293,16 +278,12 @@ TEST(AudioBufferQueueTest, ReadS16Planar) {
kSampleFormatPlanarS16, channel_layout, 100, 5, 20));
EXPECT_EQ(24, buffer.frames());
- // Read 6 frames from the buffer. Data is planar, so ch[0] should be
- // 1, 2, 3, 4, 100, 105, and ch[1] should be 5, 6, 7, 8, 200, 205.
- // Data is converted to float from -1.0 to 1.0 based on int16 range.
+ // Read 6 frames from the buffer.
scoped_ptr<AudioBus> bus = AudioBus::Create(channels, 100);
EXPECT_EQ(6, buffer.ReadFrames(6, 0, bus.get()));
EXPECT_EQ(18, buffer.frames());
- VerifyResult(bus->channel(0), 4, 1.0f / kint16max, 1.0f / kint16max);
- VerifyResult(bus->channel(0) + 4, 2, 100.0f / kint16max, 5.0f / kint16max);
- VerifyResult(bus->channel(1), 4, 5.0f / kint16max, 1.0f / kint16max);
- VerifyResult(bus->channel(1) + 4, 2, 200.0f / kint16max, 5.0f / kint16max);
+ VerifyBus(bus.get(), 0, 4, 4, 1.0f / kint16max, 1.0f / kint16max);
+ VerifyBus(bus.get(), 4, 2, 20, 5.0f / kint16max, 1.0f / kint16max);
}
TEST(AudioBufferQueueTest, ReadManyChannels) {
@@ -319,14 +300,13 @@ TEST(AudioBufferQueueTest, ReadManyChannels) {
kSampleFormatF32, channel_layout, 16.0f * channels, 1.0f, 60));
EXPECT_EQ(76, buffer.frames());
- // Read 3 frames from the buffer. F32 is interleaved, so ch[0] should be
- // 1, 17, 33, and ch[1] should be 2, 18, 34. Just check a few channels.
+ // Read 3 frames from the buffer.
scoped_ptr<AudioBus> bus = AudioBus::Create(channels, 100);
EXPECT_EQ(30, buffer.ReadFrames(30, 0, bus.get()));
EXPECT_EQ(46, buffer.frames());
- for (int i = 0; i < channels; ++i) {
- VerifyResult(bus->channel(i), 30, static_cast<float>(i), 8.0f);
- }
+ VerifyBus(bus.get(), 0, 6, 6, 0, 1);
+ VerifyBus(bus.get(), 6, 10, 10, 6 * channels, 1);
+ VerifyBus(bus.get(), 16, 14, 60, 16 * channels, 1);
}
TEST(AudioBufferQueueTest, Peek) {
@@ -335,43 +315,32 @@ TEST(AudioBufferQueueTest, Peek) {
AudioBufferQueue buffer;
// Add 60 frames of data.
- buffer.Append(
- MakeTestBuffer<float>(kSampleFormatF32, channel_layout, 0.0f, 1.0f, 60));
- EXPECT_EQ(60, buffer.frames());
+ const int frames = 60;
+ buffer.Append(MakeTestBuffer<float>(
+ kSampleFormatF32, channel_layout, 0.0f, 1.0f, frames));
+ EXPECT_EQ(frames, buffer.frames());
// Peek at the first 30 frames.
- scoped_ptr<AudioBus> bus1 = AudioBus::Create(channels, 100);
- EXPECT_EQ(60, buffer.frames());
- EXPECT_EQ(60, buffer.PeekFrames(100, 0, 0, bus1.get()));
+ scoped_ptr<AudioBus> bus1 = AudioBus::Create(channels, frames);
+ EXPECT_EQ(frames, buffer.frames());
+ EXPECT_EQ(frames, buffer.PeekFrames(60, 0, 0, bus1.get()));
EXPECT_EQ(30, buffer.PeekFrames(30, 0, 0, bus1.get()));
- EXPECT_EQ(60, buffer.frames());
+ EXPECT_EQ(frames, buffer.frames());
+ VerifyBus(bus1.get(), 0, 30, bus1->frames(), 0, 1);
// Now read the next 30 frames (which should be the same as those peeked at).
- scoped_ptr<AudioBus> bus2 = AudioBus::Create(channels, 100);
+ scoped_ptr<AudioBus> bus2 = AudioBus::Create(channels, frames);
EXPECT_EQ(30, buffer.ReadFrames(30, 0, bus2.get()));
- for (int i = 0; i < channels; ++i) {
- VerifyResult(bus1->channel(i),
- 30,
- static_cast<float>(i),
- static_cast<float>(channels));
- VerifyResult(bus2->channel(i),
- 30,
- static_cast<float>(i),
- static_cast<float>(channels));
- }
+ VerifyBus(bus2.get(), 0, 30, bus2->frames(), 0, 1);
// Peek 10 frames forward
+ bus1->Zero();
EXPECT_EQ(5, buffer.PeekFrames(5, 10, 0, bus1.get()));
- for (int i = 0; i < channels; ++i) {
- VerifyResult(bus1->channel(i),
- 5,
- static_cast<float>(i + 40 * channels),
- static_cast<float>(channels));
- }
+ VerifyBus(bus1.get(), 0, 5, bus1->frames(), 40, 1);
// Peek to the end of the buffer.
EXPECT_EQ(30, buffer.frames());
- EXPECT_EQ(30, buffer.PeekFrames(100, 0, 0, bus1.get()));
+ EXPECT_EQ(30, buffer.PeekFrames(60, 0, 0, bus1.get()));
EXPECT_EQ(30, buffer.PeekFrames(30, 0, 0, bus1.get()));
}
@@ -380,36 +349,43 @@ TEST(AudioBufferQueueTest, Time) {
const int channels = ChannelLayoutToChannelCount(channel_layout);
const base::TimeDelta start_time1;
const base::TimeDelta start_time2 = base::TimeDelta::FromSeconds(30);
- const base::TimeDelta duration = base::TimeDelta::FromSeconds(10);
AudioBufferQueue buffer;
scoped_ptr<AudioBus> bus = AudioBus::Create(channels, 100);
+ scoped_refptr<AudioBuffer> audio_buffer =
+ MakeAudioBuffer<int16>(kSampleFormatS16,
+ channel_layout,
+ channels,
+ kSampleRate,
+ 1,
+ 1,
+ 10,
+ start_time1);
+
// Add two buffers (second one added later):
// first: start=0s, duration=10s
// second: start=30s, duration=10s
- buffer.Append(MakeAudioBuffer<int16>(kSampleFormatS16,
- channel_layout,
- channels,
- kSampleRate,
- 1,
- 1,
- 10,
- start_time1,
- duration));
+ buffer.Append(audio_buffer);
EXPECT_EQ(10, buffer.frames());
// Check starting time.
EXPECT_EQ(start_time1, buffer.current_time());
// Read 2 frames, should be 2s in (since duration is 1s per sample).
- EXPECT_EQ(2, buffer.ReadFrames(2, 0, bus.get()));
- EXPECT_EQ(start_time1 + base::TimeDelta::FromSeconds(2),
- buffer.current_time());
+ int frames_read = 2;
+ EXPECT_EQ(frames_read, buffer.ReadFrames(frames_read, 0, bus.get()));
+ EXPECT_EQ(
+ start_time1 +
+ frames_read * audio_buffer->duration() / audio_buffer->frame_count(),
+ buffer.current_time());
// Skip 2 frames.
buffer.SeekFrames(2);
- EXPECT_EQ(start_time1 + base::TimeDelta::FromSeconds(4),
- buffer.current_time());
+ frames_read += 2;
+ EXPECT_EQ(
+ start_time1 +
+ frames_read * audio_buffer->duration() / audio_buffer->frame_count(),
+ buffer.current_time());
// Add second buffer for more data.
buffer.Append(MakeAudioBuffer<int16>(kSampleFormatS16,
@@ -419,28 +395,32 @@ TEST(AudioBufferQueueTest, Time) {
1,
1,
10,
- start_time2,
- duration));
+ start_time2));
EXPECT_EQ(16, buffer.frames());
// Read until almost the end of buffer1.
+ frames_read += 5;
EXPECT_EQ(5, buffer.ReadFrames(5, 0, bus.get()));
- EXPECT_EQ(start_time1 + base::TimeDelta::FromSeconds(9),
- buffer.current_time());
+ EXPECT_EQ(
+ start_time1 +
+ frames_read * audio_buffer->duration() / audio_buffer->frame_count(),
+ buffer.current_time());
// Read 1 value, so time moved to buffer2.
EXPECT_EQ(1, buffer.ReadFrames(1, 0, bus.get()));
EXPECT_EQ(start_time2, buffer.current_time());
// Read all 10 frames in buffer2, timestamp should be last time from buffer2.
+ frames_read = 10;
EXPECT_EQ(10, buffer.ReadFrames(10, 0, bus.get()));
- EXPECT_EQ(start_time2 + base::TimeDelta::FromSeconds(10),
- buffer.current_time());
+ const base::TimeDelta expected_current_time =
+ start_time2 +
+ frames_read * audio_buffer->duration() / audio_buffer->frame_count();
+ EXPECT_EQ(expected_current_time, buffer.current_time());
// Try to read more frames (which don't exist), timestamp should remain.
EXPECT_EQ(0, buffer.ReadFrames(5, 0, bus.get()));
- EXPECT_EQ(start_time2 + base::TimeDelta::FromSeconds(10),
- buffer.current_time());
+ EXPECT_EQ(expected_current_time, buffer.current_time());
}
TEST(AudioBufferQueueTest, NoTime) {
diff --git a/media/base/audio_buffer_unittest.cc b/media/base/audio_buffer_unittest.cc
index c40c076..55ff4ed 100644
--- a/media/base/audio_buffer_unittest.cc
+++ b/media/base/audio_buffer_unittest.cc
@@ -2,8 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "base/strings/string_util.h"
-#include "base/strings/stringprintf.h"
#include "media/base/audio_buffer.h"
#include "media/base/audio_bus.h"
#include "media/base/test_helpers.h"
@@ -11,39 +9,44 @@
namespace media {
-const static int kSampleRate = 44100;
-
-static void VerifyResult(float* channel_data,
- int frames,
- float start,
- float increment) {
- for (int i = 0; i < frames; ++i) {
- SCOPED_TRACE(base::StringPrintf(
- "i=%d/%d start=%f, increment=%f", i, frames, start, increment));
- ASSERT_EQ(channel_data[i], start);
- start += increment;
+static const int kSampleRate = 48000;
+
+static void VerifyBus(AudioBus* bus, int frames, float start, float increment) {
+ for (int ch = 0; ch < bus->channels(); ++ch) {
+ const float v = start + ch * bus->frames() * increment;
+ for (int i = 0; i < frames; ++i) {
+ ASSERT_FLOAT_EQ(v + i * increment, bus->channel(ch)[i]) << "i=" << i
+ << ", ch=" << ch;
+ }
}
}
TEST(AudioBufferTest, CopyFrom) {
- const ChannelLayout channel_layout = CHANNEL_LAYOUT_MONO;
- const int frames = 8;
- const base::TimeDelta start_time;
- const base::TimeDelta duration = base::TimeDelta::FromSeconds(frames);
- scoped_refptr<AudioBuffer> buffer =
+ const ChannelLayout kChannelLayout = CHANNEL_LAYOUT_MONO;
+ scoped_refptr<AudioBuffer> original_buffer =
MakeAudioBuffer<uint8>(kSampleFormatU8,
- channel_layout,
- ChannelLayoutToChannelCount(channel_layout),
+ kChannelLayout,
+ ChannelLayoutToChannelCount(kChannelLayout),
kSampleRate,
1,
1,
- frames,
- start_time,
- duration);
- EXPECT_EQ(frames, buffer->frame_count());
- EXPECT_EQ(buffer->timestamp(), start_time);
- EXPECT_EQ(buffer->duration().InSeconds(), frames);
- EXPECT_FALSE(buffer->end_of_stream());
+ kSampleRate / 100,
+ base::TimeDelta());
+ scoped_refptr<AudioBuffer> new_buffer =
+ AudioBuffer::CopyFrom(kSampleFormatU8,
+ original_buffer->channel_layout(),
+ original_buffer->channel_count(),
+ original_buffer->sample_rate(),
+ original_buffer->frame_count(),
+ &original_buffer->channel_data()[0],
+ original_buffer->timestamp());
+ EXPECT_EQ(original_buffer->frame_count(), new_buffer->frame_count());
+ EXPECT_EQ(original_buffer->timestamp(), new_buffer->timestamp());
+ EXPECT_EQ(original_buffer->duration(), new_buffer->duration());
+ EXPECT_EQ(original_buffer->sample_rate(), new_buffer->sample_rate());
+ EXPECT_EQ(original_buffer->channel_count(), new_buffer->channel_count());
+ EXPECT_EQ(original_buffer->channel_layout(), new_buffer->channel_layout());
+ EXPECT_FALSE(original_buffer->end_of_stream());
}
TEST(AudioBufferTest, CreateEOSBuffer) {
@@ -55,8 +58,7 @@ TEST(AudioBufferTest, FrameSize) {
const uint8 kTestData[] = { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14,
15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26,
27, 28, 29, 30, 31 };
- const base::TimeDelta kTimestampA = base::TimeDelta::FromMicroseconds(1337);
- const base::TimeDelta kTimestampB = base::TimeDelta::FromMicroseconds(1234);
+ const base::TimeDelta kTimestamp = base::TimeDelta::FromMicroseconds(1337);
const uint8* const data[] = { kTestData };
scoped_refptr<AudioBuffer> buffer =
@@ -66,8 +68,7 @@ TEST(AudioBufferTest, FrameSize) {
kSampleRate,
16,
data,
- kTimestampA,
- kTimestampB);
+ kTimestamp);
EXPECT_EQ(16, buffer->frame_count()); // 2 channels of 8-bit data
buffer = AudioBuffer::CopyFrom(kSampleFormatF32,
@@ -76,17 +77,15 @@ TEST(AudioBufferTest, FrameSize) {
kSampleRate,
2,
data,
- kTimestampA,
- kTimestampB);
+ kTimestamp);
EXPECT_EQ(2, buffer->frame_count()); // now 4 channels of 32-bit data
}
TEST(AudioBufferTest, ReadU8) {
const ChannelLayout channel_layout = CHANNEL_LAYOUT_4_0;
const int channels = ChannelLayoutToChannelCount(channel_layout);
- const int frames = 4;
+ const int frames = 10;
const base::TimeDelta start_time;
- const base::TimeDelta duration = base::TimeDelta::FromSeconds(frames);
scoped_refptr<AudioBuffer> buffer = MakeAudioBuffer<uint8>(kSampleFormatU8,
channel_layout,
channels,
@@ -94,19 +93,16 @@ TEST(AudioBufferTest, ReadU8) {
128,
1,
frames,
- start_time,
- duration);
-
- // Read all 4 frames from the buffer. Data is interleaved, so ch[0] should be
- // 128, 132, 136, 140, other channels similar. However, values are converted
- // from [0, 255] to [-1.0, 1.0] with a bias of 128. Thus the first buffer
- // value should be 0.0, then 1/127, 2/127, etc.
- scoped_ptr<AudioBus> bus = AudioBus::Create(channels, 100);
+ start_time);
+ scoped_ptr<AudioBus> bus = AudioBus::Create(channels, frames);
buffer->ReadFrames(frames, 0, 0, bus.get());
- VerifyResult(bus->channel(0), frames, 0.0f, 4.0f / 127.0f);
- VerifyResult(bus->channel(1), frames, 1.0f / 127.0f, 4.0f / 127.0f);
- VerifyResult(bus->channel(2), frames, 2.0f / 127.0f, 4.0f / 127.0f);
- VerifyResult(bus->channel(3), frames, 3.0f / 127.0f, 4.0f / 127.0f);
+ VerifyBus(bus.get(), frames, 0, 1.0f / 127.0f);
+
+ // Now read the same data one frame at a time.
+ bus->Zero();
+ for (int i = 0; i < frames; ++i)
+ buffer->ReadFrames(1, i, i, bus.get());
+ VerifyBus(bus.get(), frames, 0, 1.0f / 127.0f);
}
TEST(AudioBufferTest, ReadS16) {
@@ -114,7 +110,6 @@ TEST(AudioBufferTest, ReadS16) {
const int channels = ChannelLayoutToChannelCount(channel_layout);
const int frames = 10;
const base::TimeDelta start_time;
- const base::TimeDelta duration = base::TimeDelta::FromSeconds(frames);
scoped_refptr<AudioBuffer> buffer = MakeAudioBuffer<int16>(kSampleFormatS16,
channel_layout,
channels,
@@ -122,32 +117,23 @@ TEST(AudioBufferTest, ReadS16) {
1,
1,
frames,
- start_time,
- duration);
-
- // Read 6 frames from the buffer. Data is interleaved, so ch[0] should be 1,
- // 3, 5, 7, 9, 11, and ch[1] should be 2, 4, 6, 8, 10, 12. Data is converted
- // to float from -1.0 to 1.0 based on int16 range.
- scoped_ptr<AudioBus> bus = AudioBus::Create(channels, 100);
- buffer->ReadFrames(6, 0, 0, bus.get());
- VerifyResult(bus->channel(0), 6, 1.0f / kint16max, 2.0f / kint16max);
- VerifyResult(bus->channel(1), 6, 2.0f / kint16max, 2.0f / kint16max);
+ start_time);
+ scoped_ptr<AudioBus> bus = AudioBus::Create(channels, frames);
+ buffer->ReadFrames(frames, 0, 0, bus.get());
+ VerifyBus(bus.get(), frames, 1.0f / kint16max, 1.0f / kint16max);
// Now read the same data one frame at a time.
- bus = AudioBus::Create(channels, 100);
- for (int i = 0; i < frames; ++i) {
+ bus->Zero();
+ for (int i = 0; i < frames; ++i)
buffer->ReadFrames(1, i, i, bus.get());
- }
- VerifyResult(bus->channel(0), frames, 1.0f / kint16max, 2.0f / kint16max);
- VerifyResult(bus->channel(1), frames, 2.0f / kint16max, 2.0f / kint16max);
+ VerifyBus(bus.get(), frames, 1.0f / kint16max, 1.0f / kint16max);
}
TEST(AudioBufferTest, ReadS32) {
const ChannelLayout channel_layout = CHANNEL_LAYOUT_STEREO;
const int channels = ChannelLayoutToChannelCount(channel_layout);
- const int frames = 6;
+ const int frames = 20;
const base::TimeDelta start_time;
- const base::TimeDelta duration = base::TimeDelta::FromSeconds(frames);
scoped_refptr<AudioBuffer> buffer = MakeAudioBuffer<int32>(kSampleFormatS32,
channel_layout,
channels,
@@ -155,22 +141,15 @@ TEST(AudioBufferTest, ReadS32) {
1,
1,
frames,
- start_time,
- duration);
-
- // Read 6 frames from the buffer. Data is interleaved, so ch[0] should be 1,
- // 3, 5, 7, 9, 11, and ch[1] should be 2, 4, 6, 8, 10, 12. Data is converted
- // to float from -1.0 to 1.0 based on int32 range.
- scoped_ptr<AudioBus> bus = AudioBus::Create(channels, 100);
+ start_time);
+ scoped_ptr<AudioBus> bus = AudioBus::Create(channels, frames);
buffer->ReadFrames(frames, 0, 0, bus.get());
- VerifyResult(bus->channel(0), frames, 1.0f / kint32max, 2.0f / kint32max);
- VerifyResult(bus->channel(1), frames, 2.0f / kint32max, 2.0f / kint32max);
-
- // Now read 2 frames starting at frame offset 3. ch[0] should be 7, 9, and
- // ch[1] should be 8, 10.
- buffer->ReadFrames(2, 3, 0, bus.get());
- VerifyResult(bus->channel(0), 2, 7.0f / kint32max, 2.0f / kint32max);
- VerifyResult(bus->channel(1), 2, 8.0f / kint32max, 2.0f / kint32max);
+ VerifyBus(bus.get(), frames, 1.0f / kint32max, 1.0f / kint32max);
+
+ // Read second 10 frames.
+ bus->Zero();
+ buffer->ReadFrames(10, 10, 0, bus.get());
+ VerifyBus(bus.get(), 10, 11.0f / kint32max, 1.0f / kint32max);
}
TEST(AudioBufferTest, ReadF32) {
@@ -178,7 +157,6 @@ TEST(AudioBufferTest, ReadF32) {
const int channels = ChannelLayoutToChannelCount(channel_layout);
const int frames = 20;
const base::TimeDelta start_time;
- const base::TimeDelta duration = base::TimeDelta::FromSeconds(frames);
scoped_refptr<AudioBuffer> buffer = MakeAudioBuffer<float>(kSampleFormatF32,
channel_layout,
channels,
@@ -186,21 +164,15 @@ TEST(AudioBufferTest, ReadF32) {
1.0f,
1.0f,
frames,
- start_time,
- duration);
-
- // Read first 10 frames from the buffer. F32 is interleaved, so ch[0] should
- // be 1, 3, 5, ... and ch[1] should be 2, 4, 6, ...
- scoped_ptr<AudioBus> bus = AudioBus::Create(channels, 100);
+ start_time);
+ scoped_ptr<AudioBus> bus = AudioBus::Create(channels, frames);
buffer->ReadFrames(10, 0, 0, bus.get());
- VerifyResult(bus->channel(0), 10, 1.0f, 2.0f);
- VerifyResult(bus->channel(1), 10, 2.0f, 2.0f);
+ VerifyBus(bus.get(), 10, 1, 1);
// Read second 10 frames.
- bus = AudioBus::Create(channels, 100);
+ bus->Zero();
buffer->ReadFrames(10, 10, 0, bus.get());
- VerifyResult(bus->channel(0), 10, 21.0f, 2.0f);
- VerifyResult(bus->channel(1), 10, 22.0f, 2.0f);
+ VerifyBus(bus.get(), 10, 11, 1);
}
TEST(AudioBufferTest, ReadS16Planar) {
@@ -208,7 +180,6 @@ TEST(AudioBufferTest, ReadS16Planar) {
const int channels = ChannelLayoutToChannelCount(channel_layout);
const int frames = 20;
const base::TimeDelta start_time;
- const base::TimeDelta duration = base::TimeDelta::FromSeconds(frames);
scoped_refptr<AudioBuffer> buffer =
MakeAudioBuffer<int16>(kSampleFormatPlanarS16,
channel_layout,
@@ -217,32 +188,25 @@ TEST(AudioBufferTest, ReadS16Planar) {
1,
1,
frames,
- start_time,
- duration);
-
- // Read 6 frames from the buffer. Data is planar, so ch[0] should be 1, 2, 3,
- // 4, 5, 6, and ch[1] should be 21, 22, 23, 24, 25, 26. Data is converted to
- // float from -1.0 to 1.0 based on int16 range.
- scoped_ptr<AudioBus> bus = AudioBus::Create(channels, 100);
- buffer->ReadFrames(6, 0, 0, bus.get());
- VerifyResult(bus->channel(0), 6, 1.0f / kint16max, 1.0f / kint16max);
- VerifyResult(bus->channel(1), 6, 21.0f / kint16max, 1.0f / kint16max);
+ start_time);
+ scoped_ptr<AudioBus> bus = AudioBus::Create(channels, frames);
+ buffer->ReadFrames(10, 0, 0, bus.get());
+ VerifyBus(bus.get(), 10, 1.0f / kint16max, 1.0f / kint16max);
// Read all the frames backwards, one by one. ch[0] should be 20, 19, ...
- bus = AudioBus::Create(channels, 100);
- for (int i = 0; i < frames; ++i) {
- buffer->ReadFrames(1, frames - i - 1, i, bus.get());
- }
- VerifyResult(bus->channel(0), frames, 20.0f / kint16max, -1.0f / kint16max);
- VerifyResult(bus->channel(1), frames, 40.0f / kint16max, -1.0f / kint16max);
+ bus->Zero();
+ for (int i = frames - 1; i >= 0; --i)
+ buffer->ReadFrames(1, i, i, bus.get());
+ VerifyBus(bus.get(), frames, 1.0f / kint16max, 1.0f / kint16max);
// Read 0 frames with different offsets. Existing data in AudioBus should be
// unchanged.
buffer->ReadFrames(0, 0, 0, bus.get());
+ VerifyBus(bus.get(), frames, 1.0f / kint16max, 1.0f / kint16max);
buffer->ReadFrames(0, 0, 10, bus.get());
+ VerifyBus(bus.get(), frames, 1.0f / kint16max, 1.0f / kint16max);
buffer->ReadFrames(0, 10, 0, bus.get());
- VerifyResult(bus->channel(0), frames, 20.0f / kint16max, -1.0f / kint16max);
- VerifyResult(bus->channel(1), frames, 40.0f / kint16max, -1.0f / kint16max);
+ VerifyBus(bus.get(), frames, 1.0f / kint16max, 1.0f / kint16max);
}
TEST(AudioBufferTest, ReadF32Planar) {
@@ -250,7 +214,6 @@ TEST(AudioBufferTest, ReadF32Planar) {
const int channels = ChannelLayoutToChannelCount(channel_layout);
const int frames = 100;
const base::TimeDelta start_time;
- const base::TimeDelta duration = base::TimeDelta::FromSeconds(frames);
scoped_refptr<AudioBuffer> buffer =
MakeAudioBuffer<float>(kSampleFormatPlanarF32,
channel_layout,
@@ -259,103 +222,94 @@ TEST(AudioBufferTest, ReadF32Planar) {
1.0f,
1.0f,
frames,
- start_time,
- duration);
+ start_time);
// Read all 100 frames from the buffer. F32 is planar, so ch[0] should be 1,
// 2, 3, 4, ..., ch[1] should be 101, 102, 103, ..., and so on for all 4
// channels.
scoped_ptr<AudioBus> bus = AudioBus::Create(channels, 100);
buffer->ReadFrames(frames, 0, 0, bus.get());
- VerifyResult(bus->channel(0), frames, 1.0f, 1.0f);
- VerifyResult(bus->channel(1), frames, 101.0f, 1.0f);
- VerifyResult(bus->channel(2), frames, 201.0f, 1.0f);
- VerifyResult(bus->channel(3), frames, 301.0f, 1.0f);
+ VerifyBus(bus.get(), frames, 1, 1);
// Now read 20 frames from the middle of the buffer.
- bus = AudioBus::Create(channels, 100);
+ bus->Zero();
buffer->ReadFrames(20, 50, 0, bus.get());
- VerifyResult(bus->channel(0), 20, 51.0f, 1.0f);
- VerifyResult(bus->channel(1), 20, 151.0f, 1.0f);
- VerifyResult(bus->channel(2), 20, 251.0f, 1.0f);
- VerifyResult(bus->channel(3), 20, 351.0f, 1.0f);
+ VerifyBus(bus.get(), 20, 51, 1);
}
TEST(AudioBufferTest, EmptyBuffer) {
const ChannelLayout channel_layout = CHANNEL_LAYOUT_4_0;
const int channels = ChannelLayoutToChannelCount(channel_layout);
- const int frames = 100;
+ const int frames = kSampleRate / 100;
const base::TimeDelta start_time;
- const base::TimeDelta duration = base::TimeDelta::FromSeconds(frames);
scoped_refptr<AudioBuffer> buffer = AudioBuffer::CreateEmptyBuffer(
- channel_layout, channels, kSampleRate, frames, start_time, duration);
+ channel_layout, channels, kSampleRate, frames, start_time);
EXPECT_EQ(frames, buffer->frame_count());
EXPECT_EQ(start_time, buffer->timestamp());
- EXPECT_EQ(frames, buffer->duration().InSeconds());
+ EXPECT_EQ(base::TimeDelta::FromMilliseconds(10), buffer->duration());
EXPECT_FALSE(buffer->end_of_stream());
// Read all 100 frames from the buffer. All data should be 0.
- scoped_ptr<AudioBus> bus = AudioBus::Create(channels, 100);
+ scoped_ptr<AudioBus> bus = AudioBus::Create(channels, frames);
buffer->ReadFrames(frames, 0, 0, bus.get());
- VerifyResult(bus->channel(0), frames, 0.0f, 0.0f);
- VerifyResult(bus->channel(1), frames, 0.0f, 0.0f);
- VerifyResult(bus->channel(2), frames, 0.0f, 0.0f);
- VerifyResult(bus->channel(3), frames, 0.0f, 0.0f);
+ VerifyBus(bus.get(), frames, 0, 0);
}
TEST(AudioBufferTest, Trim) {
const ChannelLayout channel_layout = CHANNEL_LAYOUT_4_0;
const int channels = ChannelLayoutToChannelCount(channel_layout);
- const int frames = 100;
+ const int frames = kSampleRate / 10;
const base::TimeDelta start_time;
- const base::TimeDelta duration = base::TimeDelta::FromSeconds(frames);
+ const base::TimeDelta duration = base::TimeDelta::FromMilliseconds(100);
scoped_refptr<AudioBuffer> buffer =
MakeAudioBuffer<float>(kSampleFormatPlanarF32,
channel_layout,
channels,
kSampleRate,
- 1.0f,
+ 0.0f,
1.0f,
frames,
- start_time,
- duration);
+ start_time);
EXPECT_EQ(frames, buffer->frame_count());
EXPECT_EQ(start_time, buffer->timestamp());
- EXPECT_EQ(frames, buffer->duration().InSeconds());
-
- scoped_ptr<AudioBus> bus = AudioBus::Create(channels, 100);
- buffer->ReadFrames(20, 0, 0, bus.get());
- VerifyResult(bus->channel(0), 20, 1.0f, 1.0f);
-
- // Trim off 10 frames from the start.
- buffer->TrimStart(10);
- EXPECT_EQ(buffer->frame_count(), frames - 10);
- EXPECT_EQ(buffer->timestamp(), start_time + base::TimeDelta::FromSeconds(10));
- EXPECT_EQ(buffer->duration(), base::TimeDelta::FromSeconds(90));
- buffer->ReadFrames(20, 0, 0, bus.get());
- VerifyResult(bus->channel(0), 20, 11.0f, 1.0f);
-
- // Trim off 10 frames from the end.
- buffer->TrimEnd(10);
- EXPECT_EQ(buffer->frame_count(), frames - 20);
- EXPECT_EQ(buffer->timestamp(), start_time + base::TimeDelta::FromSeconds(10));
- EXPECT_EQ(buffer->duration(), base::TimeDelta::FromSeconds(80));
- buffer->ReadFrames(20, 0, 0, bus.get());
- VerifyResult(bus->channel(0), 20, 11.0f, 1.0f);
-
- // Trim off 50 more from the start.
- buffer->TrimStart(50);
- EXPECT_EQ(buffer->frame_count(), frames - 70);
- EXPECT_EQ(buffer->timestamp(), start_time + base::TimeDelta::FromSeconds(60));
- EXPECT_EQ(buffer->duration(), base::TimeDelta::FromSeconds(30));
- buffer->ReadFrames(10, 0, 0, bus.get());
- VerifyResult(bus->channel(0), 10, 61.0f, 1.0f);
-
- // Trim off the last 30 frames.
- buffer->TrimEnd(30);
- EXPECT_EQ(buffer->frame_count(), 0);
- EXPECT_EQ(buffer->timestamp(), start_time + base::TimeDelta::FromSeconds(60));
- EXPECT_EQ(buffer->duration(), base::TimeDelta::FromSeconds(0));
+ EXPECT_EQ(duration, buffer->duration());
+
+ const int ten_ms_of_frames = kSampleRate / 100;
+ const base::TimeDelta ten_ms = base::TimeDelta::FromMilliseconds(10);
+
+ scoped_ptr<AudioBus> bus = AudioBus::Create(channels, frames);
+ buffer->ReadFrames(buffer->frame_count(), 0, 0, bus.get());
+ VerifyBus(bus.get(), buffer->frame_count(), 0.0f, 1.0f);
+
+ // Trim off 10ms of frames from the start.
+ buffer->TrimStart(ten_ms_of_frames);
+ EXPECT_EQ(start_time + ten_ms, buffer->timestamp());
+ EXPECT_EQ(frames - ten_ms_of_frames, buffer->frame_count());
+ EXPECT_EQ(duration - ten_ms, buffer->duration());
+ buffer->ReadFrames(buffer->frame_count(), 0, 0, bus.get());
+ VerifyBus(bus.get(), buffer->frame_count(), ten_ms_of_frames, 1.0f);
+
+ // Trim off 10ms of frames from the end.
+ buffer->TrimEnd(ten_ms_of_frames);
+ EXPECT_EQ(start_time + ten_ms, buffer->timestamp());
+ EXPECT_EQ(frames - 2 * ten_ms_of_frames, buffer->frame_count());
+ EXPECT_EQ(duration - 2 * ten_ms, buffer->duration());
+ buffer->ReadFrames(buffer->frame_count(), 0, 0, bus.get());
+ VerifyBus(bus.get(), buffer->frame_count(), ten_ms_of_frames, 1.0f);
+
+ // Trim off 40ms more from the start.
+ buffer->TrimStart(4 * ten_ms_of_frames);
+ EXPECT_EQ(start_time + 5 * ten_ms, buffer->timestamp());
+ EXPECT_EQ(frames - 6 * ten_ms_of_frames, buffer->frame_count());
+ EXPECT_EQ(duration - 6 * ten_ms, buffer->duration());
+ buffer->ReadFrames(buffer->frame_count(), 0, 0, bus.get());
+ VerifyBus(bus.get(), buffer->frame_count(), 5 * ten_ms_of_frames, 1.0f);
+
+ // Trim off the final 40ms from the end.
+ buffer->TrimEnd(4 * ten_ms_of_frames);
+ EXPECT_EQ(0, buffer->frame_count());
+ EXPECT_EQ(start_time + 5 * ten_ms, buffer->timestamp());
+ EXPECT_EQ(base::TimeDelta(), buffer->duration());
}
} // namespace media
diff --git a/media/base/audio_discard_helper.cc b/media/base/audio_discard_helper.cc
index 3088130..d868382 100644
--- a/media/base/audio_discard_helper.cc
+++ b/media/base/audio_discard_helper.cc
@@ -103,10 +103,8 @@ bool AudioDiscardHelper::ProcessBuffers(
DCHECK(encoded_buffer->discard_padding() == base::TimeDelta());
}
- // Assign timestamp and duration to the buffer.
+ // Assign timestamp to the buffer.
decoded_buffer->set_timestamp(timestamp_helper_.GetTimestamp());
- decoded_buffer->set_duration(
- timestamp_helper_.GetFrameDuration(decoded_buffer->frame_count()));
timestamp_helper_.AddFrames(decoded_buffer->frame_count());
return true;
}
diff --git a/media/base/audio_discard_helper_unittest.cc b/media/base/audio_discard_helper_unittest.cc
index 7788e7f..e3f21c0 100644
--- a/media/base/audio_discard_helper_unittest.cc
+++ b/media/base/audio_discard_helper_unittest.cc
@@ -33,7 +33,6 @@ static scoped_refptr<AudioBuffer> CreateDecodedBuffer(int frames) {
0.0f,
kDataStep,
frames,
- kNoTimestamp(),
kNoTimestamp());
}
diff --git a/media/base/audio_splicer.cc b/media/base/audio_splicer.cc
index b83765e..9fae417 100644
--- a/media/base/audio_splicer.cc
+++ b/media/base/audio_splicer.cc
@@ -35,20 +35,6 @@ static void AccurateTrimStart(int frames_to_trim,
const AudioTimestampHelper& timestamp_helper) {
buffer->TrimStart(frames_to_trim);
buffer->set_timestamp(timestamp_helper.GetTimestamp());
- buffer->set_duration(
- timestamp_helper.GetFrameDuration(buffer->frame_count()));
-}
-
-// AudioBuffer::TrimEnd() is not as accurate as the timestamp helper, so
-// manually adjust the duration after trimming.
-static void AccurateTrimEnd(int frames_to_trim,
- const scoped_refptr<AudioBuffer> buffer,
- const AudioTimestampHelper& timestamp_helper) {
- DCHECK_LT(std::abs(timestamp_helper.GetFramesToTarget(buffer->timestamp())),
- kMinGapSize);
- buffer->TrimEnd(frames_to_trim);
- buffer->set_duration(
- timestamp_helper.GetFrameDuration(buffer->frame_count()));
}
// Returns an AudioBus whose frame buffer is backed by the provided AudioBuffer.
@@ -177,13 +163,12 @@ bool AudioStreamSanitizer::AddInput(const scoped_refptr<AudioBuffer>& input) {
// Create a buffer with enough silence samples to fill the gap and
// add it to the output buffer.
- scoped_refptr<AudioBuffer> gap = AudioBuffer::CreateEmptyBuffer(
- input->channel_layout(),
- input->channel_count(),
- input->sample_rate(),
- frames_to_fill,
- expected_timestamp,
- output_timestamp_helper_.GetFrameDuration(frames_to_fill));
+ scoped_refptr<AudioBuffer> gap =
+ AudioBuffer::CreateEmptyBuffer(input->channel_layout(),
+ input->channel_count(),
+ input->sample_rate(),
+ frames_to_fill,
+ expected_timestamp);
AddOutputBuffer(gap);
// Add the input buffer now that the gap has been filled.
@@ -443,9 +428,7 @@ scoped_ptr<AudioBus> AudioSplicer::ExtractCrossfadeFromPreSplice(
// If only part of the buffer was consumed, trim it appropriately and stick
// it into the output queue.
if (frames_before_splice) {
- AccurateTrimEnd(preroll->frame_count() - frames_before_splice,
- preroll,
- output_ts_helper);
+ preroll->TrimEnd(preroll->frame_count() - frames_before_splice);
CHECK(output_sanitizer_->AddInput(preroll));
frames_before_splice = 0;
}
@@ -466,8 +449,6 @@ void AudioSplicer::CrossfadePostSplice(
const AudioTimestampHelper& output_ts_helper =
output_sanitizer_->timestamp_helper();
crossfade_buffer->set_timestamp(output_ts_helper.GetTimestamp());
- crossfade_buffer->set_duration(
- output_ts_helper.GetFrameDuration(pre_splice_bus->frames()));
// AudioBuffer::ReadFrames() only allows output into an AudioBus, so wrap
// our AudioBuffer in one so we can avoid extra data copies.
diff --git a/media/base/audio_splicer_unittest.cc b/media/base/audio_splicer_unittest.cc
index 71e1728..2e46b9f 100644
--- a/media/base/audio_splicer_unittest.cc
+++ b/media/base/audio_splicer_unittest.cc
@@ -35,16 +35,15 @@ class AudioSplicerTest : public ::testing::Test {
}
scoped_refptr<AudioBuffer> GetNextInputBuffer(float value, int frame_size) {
- scoped_refptr<AudioBuffer> buffer = MakeAudioBuffer<float>(
- kSampleFormat,
- kChannelLayout,
- kChannels,
- kDefaultSampleRate,
- value,
- 0.0f,
- frame_size,
- input_timestamp_helper_.GetTimestamp(),
- input_timestamp_helper_.GetFrameDuration(frame_size));
+ scoped_refptr<AudioBuffer> buffer =
+ MakeAudioBuffer<float>(kSampleFormat,
+ kChannelLayout,
+ kChannels,
+ kDefaultSampleRate,
+ value,
+ 0.0f,
+ frame_size,
+ input_timestamp_helper_.GetTimestamp());
input_timestamp_helper_.AddFrames(frame_size);
return buffer;
}
@@ -139,8 +138,7 @@ class AudioSplicerTest : public ::testing::Test {
input->sample_rate(),
input->frame_count(),
&input->channel_data()[0],
- input->timestamp(),
- input->duration());
+ input->timestamp());
return splicer_.AddInput(buffer_copy);
}
@@ -248,7 +246,8 @@ TEST_F(AudioSplicerTest, GapInsertion) {
base::TimeDelta gap_duration = input_2->timestamp() - gap_timestamp;
EXPECT_GT(gap_duration, base::TimeDelta());
EXPECT_EQ(gap_timestamp, output_2->timestamp());
- EXPECT_EQ(gap_duration, output_2->duration());
+ EXPECT_NEAR(
+ gap_duration.InMicroseconds(), output_2->duration().InMicroseconds(), 1);
EXPECT_EQ(kGapSize, output_2->frame_count());
EXPECT_TRUE(VerifyData(output_2, 0.0f));
@@ -444,7 +443,7 @@ TEST_F(AudioSplicerTest, PartialOverlapCrossfade) {
VerifyPreSpliceOutput(overlapped_buffer,
overlapping_buffer,
221,
- base::TimeDelta::FromMicroseconds(5012));
+ base::TimeDelta::FromMicroseconds(5011));
// Due to rounding the crossfade size may vary by up to a frame.
const int kExpectedCrossfadeSize = 220;
@@ -619,11 +618,13 @@ TEST_F(AudioSplicerTest, IncorrectlyMarkedSplice) {
GetNextInputBuffer(1.0f, kBufferSize);
// Fuzz the duration slightly so that the buffer overlaps the splice timestamp
// by a microsecond, which is not enough to crossfade.
- first_buffer->set_duration(first_buffer->duration() +
- base::TimeDelta::FromMicroseconds(1));
- splicer_.SetSpliceTimestamp(input_timestamp_helper_.GetTimestamp());
+ const base::TimeDelta kSpliceTimestamp =
+ input_timestamp_helper_.GetTimestamp() -
+ base::TimeDelta::FromMicroseconds(1);
+ splicer_.SetSpliceTimestamp(kSpliceTimestamp);
scoped_refptr<AudioBuffer> second_buffer =
GetNextInputBuffer(0.0f, kBufferSize);
+ second_buffer->set_timestamp(kSpliceTimestamp);
// The splicer should be internally queuing input since |first_buffer| is part
// of the supposed splice.
diff --git a/media/base/test_helpers.cc b/media/base/test_helpers.cc
index 98d4971..929b2f3 100644
--- a/media/base/test_helpers.cc
+++ b/media/base/test_helpers.cc
@@ -151,29 +151,25 @@ gfx::Size TestVideoConfig::LargeCodedSize() {
template <class T>
scoped_refptr<AudioBuffer> MakeAudioBuffer(SampleFormat format,
ChannelLayout channel_layout,
- int channel_count,
+ size_t channel_count,
int sample_rate,
T start,
T increment,
- int frames,
- base::TimeDelta timestamp,
- base::TimeDelta duration) {
- int channels = ChannelLayoutToChannelCount(channel_layout);
- scoped_refptr<AudioBuffer> output = AudioBuffer::CreateBuffer(
- format, channel_layout, channel_count, sample_rate, frames);
+ size_t frames,
+ base::TimeDelta timestamp) {
+ const size_t channels = ChannelLayoutToChannelCount(channel_layout);
+ scoped_refptr<AudioBuffer> output =
+ AudioBuffer::CreateBuffer(format,
+ channel_layout,
+ static_cast<int>(channel_count),
+ sample_rate,
+ static_cast<int>(frames));
output->set_timestamp(timestamp);
- output->set_duration(duration);
- // Create a block of memory with values:
- // start
- // start + increment
- // start + 2 * increment, ...
- // For interleaved data, raw data will be:
- // start
- // start + channels * increment
- // start + 2 * channels * increment, ...
- //
- // For planar data, values in channel 0 will be:
+ const bool is_planar =
+ format == kSampleFormatPlanarS16 || format == kSampleFormatPlanarF32;
+
+ // Values in channel 0 will be:
// start
// start + increment
// start + 2 * increment, ...
@@ -181,13 +177,13 @@ scoped_refptr<AudioBuffer> MakeAudioBuffer(SampleFormat format,
// start + frames * increment
// start + (frames + 1) * increment
// start + (frames + 2) * increment, ...
- const size_t output_size =
- output->channel_data().size() == 1 ? frames * channels : frames;
- for (size_t ch = 0; ch < output->channel_data().size(); ++ch) {
- T* buffer = reinterpret_cast<T*>(output->channel_data()[ch]);
- const T v = static_cast<T>(start + ch * output_size * increment);
- for (size_t i = 0; i < output_size; ++i) {
- buffer[i] = static_cast<T>(v + i * increment);
+ for (size_t ch = 0; ch < channels; ++ch) {
+ T* buffer =
+ reinterpret_cast<T*>(output->channel_data()[is_planar ? ch : 0]);
+ const T v = static_cast<T>(start + ch * frames * increment);
+ for (size_t i = 0; i < frames; ++i) {
+ buffer[is_planar ? i : ch + i * channels] =
+ static_cast<T>(v + i * increment);
}
}
return output;
@@ -199,13 +195,12 @@ scoped_refptr<AudioBuffer> MakeAudioBuffer(SampleFormat format,
template scoped_refptr<AudioBuffer> MakeAudioBuffer<type>( \
SampleFormat format, \
ChannelLayout channel_layout, \
- int channel_count, \
+ size_t channel_count, \
int sample_rate, \
type start, \
type increment, \
- int frames, \
- base::TimeDelta start_time, \
- base::TimeDelta duration)
+ size_t frames, \
+ base::TimeDelta start_time)
DEFINE_MAKE_AUDIO_BUFFER_INSTANCE(uint8);
DEFINE_MAKE_AUDIO_BUFFER_INSTANCE(int16);
DEFINE_MAKE_AUDIO_BUFFER_INSTANCE(int32);
diff --git a/media/base/test_helpers.h b/media/base/test_helpers.h
index f342af4..8dc3895 100644
--- a/media/base/test_helpers.h
+++ b/media/base/test_helpers.h
@@ -86,41 +86,32 @@ class TestVideoConfig {
};
// Create an AudioBuffer containing |frames| frames of data, where each sample
-// is of type T.
+// is of type T. |start| and |increment| are used to specify the values for the
+// samples, which are created in channel order. The value for frame and channel
+// is determined by:
//
-// For interleaved formats, each frame will have the data from |channels|
-// channels interleaved. |start| and |increment| are used to specify the values
-// for the samples. Since this is interleaved data, channel 0 data will be:
-// |start|
-// |start| + |channels| * |increment|
-// |start| + 2 * |channels| * |increment|, and so on.
-// Data for subsequent channels is similar. No check is done that |format|
-// requires data to be of type T, but it is verified that |format| is an
-// interleaved format.
+// |start| + |channel| * |frames| * |increment| + index * |increment|
//
-// For planar formats, there will be a block for each of |channel| channels.
-// |start| and |increment| are used to specify the values for the samples, which
-// are created in channel order. Since this is planar data, channel 0 data will
-// be:
-// |start|
-// |start| + |increment|
-// |start| + 2 * |increment|, and so on.
-// Data for channel 1 will follow where channel 0 ends. Subsequent channels are
-// similar. No check is done that |format| requires data to be of type T, but it
-// is verified that |format| is a planar format.
+// E.g., for a stereo buffer the values in channel 0 will be:
+// start
+// start + increment
+// start + 2 * increment, ...
//
-// |start_time| will be used as the start time for the samples. |duration| is
-// the duration.
+// While, values in channel 1 will be:
+// start + frames * increment
+// start + (frames + 1) * increment
+// start + (frames + 2) * increment, ...
+//
+// |start_time| will be used as the start time for the samples.
template <class T>
scoped_refptr<AudioBuffer> MakeAudioBuffer(SampleFormat format,
ChannelLayout channel_layout,
- int channel_count,
+ size_t channel_count,
int sample_rate,
T start,
T increment,
- int frames,
- base::TimeDelta timestamp,
- base::TimeDelta duration);
+ size_t frames,
+ base::TimeDelta timestamp);
// Create a fake video DecoderBuffer for testing purpose. The buffer contains
// part of video decoder config info embedded so that the testing code can do
diff --git a/media/filters/audio_renderer_algorithm_unittest.cc b/media/filters/audio_renderer_algorithm_unittest.cc
index ed6b6cc..596c8cc 100644
--- a/media/filters/audio_renderer_algorithm_unittest.cc
+++ b/media/filters/audio_renderer_algorithm_unittest.cc
@@ -113,7 +113,6 @@ class AudioRendererAlgorithmTest : public testing::Test {
1,
1,
kFrameSize,
- kNoTimestamp(),
kNoTimestamp());
break;
case kSampleFormatS16:
@@ -125,7 +124,6 @@ class AudioRendererAlgorithmTest : public testing::Test {
1,
1,
kFrameSize,
- kNoTimestamp(),
kNoTimestamp());
break;
case kSampleFormatS32:
@@ -137,7 +135,6 @@ class AudioRendererAlgorithmTest : public testing::Test {
1,
1,
kFrameSize,
- kNoTimestamp(),
kNoTimestamp());
break;
default:
diff --git a/media/filters/audio_renderer_impl_unittest.cc b/media/filters/audio_renderer_impl_unittest.cc
index 1ae27e3..05fb4de 100644
--- a/media/filters/audio_renderer_impl_unittest.cc
+++ b/media/filters/audio_renderer_impl_unittest.cc
@@ -288,8 +288,7 @@ class AudioRendererImplTest : public ::testing::Test {
kPlayingAudio,
0.0f,
size,
- next_timestamp_->GetTimestamp(),
- next_timestamp_->GetFrameDuration(size));
+ next_timestamp_->GetTimestamp());
next_timestamp_->AddFrames(size);
DeliverBuffer(AudioDecoder::kOk, buffer);
diff --git a/media/filters/decrypting_audio_decoder.cc b/media/filters/decrypting_audio_decoder.cc
index 91ee63b..d00e3b9 100644
--- a/media/filters/decrypting_audio_decoder.cc
+++ b/media/filters/decrypting_audio_decoder.cc
@@ -371,8 +371,6 @@ void DecryptingAudioDecoder::EnqueueFrames(
}
frame->set_timestamp(current_time);
- frame->set_duration(
- timestamp_helper_->GetFrameDuration(frame->frame_count()));
timestamp_helper_->AddFrames(frame->frame_count());
}
}
diff --git a/media/filters/decrypting_audio_decoder_unittest.cc b/media/filters/decrypting_audio_decoder_unittest.cc
index d7f1f9d..007a288 100644
--- a/media/filters/decrypting_audio_decoder_unittest.cc
+++ b/media/filters/decrypting_audio_decoder_unittest.cc
@@ -104,7 +104,6 @@ class DecryptingAudioDecoderTest : public testing::Test {
channels,
kSampleRate,
kFakeAudioFrameSize,
- kNoTimestamp(),
kNoTimestamp());
decoded_frame_list_.push_back(decoded_frame_);
@@ -363,14 +362,12 @@ TEST_F(DecryptingAudioDecoderTest, DecryptAndDecode_MultipleFrames) {
ChannelLayoutToChannelCount(config_.channel_layout()),
kSampleRate,
kFakeAudioFrameSize,
- kNoTimestamp(),
kNoTimestamp());
scoped_refptr<AudioBuffer> frame_b = AudioBuffer::CreateEmptyBuffer(
config_.channel_layout(),
ChannelLayoutToChannelCount(config_.channel_layout()),
kSampleRate,
kFakeAudioFrameSize,
- kNoTimestamp(),
kNoTimestamp());
decoded_frame_list_.push_back(frame_a);
decoded_frame_list_.push_back(frame_b);
diff --git a/media/filters/ffmpeg_audio_decoder_unittest.cc b/media/filters/ffmpeg_audio_decoder_unittest.cc
index 60d1f40..11df377 100644
--- a/media/filters/ffmpeg_audio_decoder_unittest.cc
+++ b/media/filters/ffmpeg_audio_decoder_unittest.cc
@@ -175,7 +175,7 @@ TEST_F(FFmpegAudioDecoderTest, ProduceAudioSamples) {
ASSERT_EQ(3u, decoded_audio_.size());
ExpectDecodedAudio(0, 0, 2902);
ExpectDecodedAudio(1, 2902, 13061);
- ExpectDecodedAudio(2, 15963, 23220);
+ ExpectDecodedAudio(2, 15963, 23219);
// Call one more time to trigger EOS.
Decode();