summaryrefslogtreecommitdiffstats
path: root/media/base
diff options
context:
space:
mode:
authorjrummell@chromium.org <jrummell@chromium.org@0039d316-1c4b-4281-b951-d872f2087c98>2013-06-21 09:00:20 +0000
committerjrummell@chromium.org <jrummell@chromium.org@0039d316-1c4b-4281-b951-d872f2087c98>2013-06-21 09:00:20 +0000
commit2b44aea37a19a0f114750659d88858b94e4dd8ec (patch)
tree791dc35b64d650dd183e8dd4ff9ba54f7cac1f45 /media/base
parent067433168c4d7ccf8dcfc6bacda3c50f4bcc103c (diff)
downloadchromium_src-2b44aea37a19a0f114750659d88858b94e4dd8ec.zip
chromium_src-2b44aea37a19a0f114750659d88858b94e4dd8ec.tar.gz
chromium_src-2b44aea37a19a0f114750659d88858b94e4dd8ec.tar.bz2
Add new class AudioBufferQueue.
As part of the work to simplify the handling of audio data, adding this class to create a queue of audio data. Using this class will come in a subsequent CL. BUG=248989 Review URL: https://chromiumcodereview.appspot.com/17112016 git-svn-id: svn://svn.chromium.org/chrome/trunk/src@207761 0039d316-1c4b-4281-b951-d872f2087c98
Diffstat (limited to 'media/base')
-rw-r--r--media/base/audio_buffer_queue.cc158
-rw-r--r--media/base/audio_buffer_queue.h95
-rw-r--r--media/base/audio_buffer_queue_unittest.cc456
-rw-r--r--media/base/audio_buffer_unittest.cc84
-rw-r--r--media/base/test_helpers.cc99
-rw-r--r--media/base/test_helpers.h49
6 files changed, 865 insertions, 76 deletions
diff --git a/media/base/audio_buffer_queue.cc b/media/base/audio_buffer_queue.cc
new file mode 100644
index 0000000..3fa3775
--- /dev/null
+++ b/media/base/audio_buffer_queue.cc
@@ -0,0 +1,158 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "media/base/audio_buffer_queue.h"
+
+#include <algorithm>
+
+#include "base/logging.h"
+#include "media/base/audio_bus.h"
+#include "media/base/buffers.h"
+
+namespace media {
+
+AudioBufferQueue::AudioBufferQueue() { Clear(); }
+AudioBufferQueue::~AudioBufferQueue() {}
+
+void AudioBufferQueue::Clear() {
+ buffers_.clear();
+ current_buffer_ = buffers_.begin();
+ current_buffer_offset_ = 0;
+ frames_ = 0;
+ current_time_ = kNoTimestamp();
+}
+
+void AudioBufferQueue::Append(const scoped_refptr<AudioBuffer>& buffer_in) {
+ // If we have just written the first buffer, update |current_time_| to be the
+ // start time.
+ if (buffers_.empty()) {
+ DCHECK_EQ(frames_, 0);
+ current_time_ = buffer_in->timestamp();
+ }
+
+ // Add the buffer to the queue. Inserting into deque invalidates all
+ // iterators, so point to the first buffer.
+ buffers_.push_back(buffer_in);
+ current_buffer_ = buffers_.begin();
+
+ // Update the |frames_| counter since we have added frames.
+ frames_ += buffer_in->frame_count();
+ CHECK_GT(frames_, 0); // make sure it doesn't overflow.
+}
+
+int AudioBufferQueue::ReadFrames(int frames, AudioBus* dest) {
+ DCHECK_GE(dest->frames(), frames);
+ return InternalRead(frames, true, 0, dest);
+}
+
+int AudioBufferQueue::PeekFrames(int frames,
+ int forward_offset,
+ AudioBus* dest) {
+ DCHECK_GE(dest->frames(), frames);
+ return InternalRead(frames, false, forward_offset, dest);
+}
+
+void AudioBufferQueue::SeekFrames(int frames) {
+ // Perform seek only if we have enough bytes in the queue.
+ CHECK_LE(frames, frames_);
+ int taken = InternalRead(frames, true, 0, NULL);
+ DCHECK_EQ(taken, frames);
+}
+
+int AudioBufferQueue::InternalRead(int frames,
+ bool advance_position,
+ int forward_offset,
+ AudioBus* dest) {
+ // Counts how many frames are actually read from the buffer queue.
+ int taken = 0;
+ BufferQueue::iterator current_buffer = current_buffer_;
+ int current_buffer_offset = current_buffer_offset_;
+
+ int frames_to_skip = forward_offset;
+ while (taken < frames) {
+ // |current_buffer| is valid since the first time this buffer is appended
+ // with data. Make sure there is data to be processed.
+ if (current_buffer == buffers_.end())
+ break;
+
+ scoped_refptr<AudioBuffer> buffer = *current_buffer;
+
+ int remaining_frames_in_buffer =
+ buffer->frame_count() - current_buffer_offset;
+
+ if (frames_to_skip > 0) {
+ // If there are frames to skip, do it first. May need to skip into
+ // subsequent buffers.
+ int skipped = std::min(remaining_frames_in_buffer, frames_to_skip);
+ current_buffer_offset += skipped;
+ frames_to_skip -= skipped;
+ } else {
+ // Find the right amount to copy from the current buffer. We shall copy no
+ // more than |frames| frames in total and each single step copies no more
+ // than the current buffer size.
+ int copied = std::min(frames - taken, remaining_frames_in_buffer);
+
+ // if |dest| is NULL, there's no need to copy.
+ if (dest)
+ buffer->ReadFrames(copied, current_buffer_offset, taken, dest);
+
+ // Increase total number of frames copied, which regulates when to end
+ // this loop.
+ taken += copied;
+
+ // We have read |copied| frames from the current buffer. Advance the
+ // offset.
+ current_buffer_offset += copied;
+ }
+
+ // Has the buffer has been consumed?
+ if (current_buffer_offset == buffer->frame_count()) {
+ if (advance_position) {
+ // Next buffer may not have timestamp, so we need to update current
+ // timestamp before switching to the next buffer.
+ UpdateCurrentTime(current_buffer, current_buffer_offset);
+ }
+
+ // If we are at the last buffer, no more data to be copied, so stop.
+ BufferQueue::iterator next = current_buffer + 1;
+ if (next == buffers_.end())
+ break;
+
+ // Advances the iterator.
+ current_buffer = next;
+ current_buffer_offset = 0;
+ }
+ }
+
+ if (advance_position) {
+ // Update the appropriate values since |taken| frames have been copied out.
+ frames_ -= taken;
+ DCHECK_GE(frames_, 0);
+ DCHECK(current_buffer_ != buffers_.end() || frames_ == 0);
+
+ current_buffer_ = current_buffer;
+ current_buffer_offset_ = current_buffer_offset;
+
+ UpdateCurrentTime(current_buffer_, current_buffer_offset_);
+
+ // Remove any buffers before the current buffer as there is no going
+ // backwards.
+ buffers_.erase(buffers_.begin(), current_buffer_);
+ }
+
+ return taken;
+}
+
+void AudioBufferQueue::UpdateCurrentTime(BufferQueue::iterator buffer,
+ int offset) {
+ if (buffer != buffers_.end() && (*buffer)->timestamp() != kNoTimestamp()) {
+ double time_offset = ((*buffer)->duration().InMicroseconds() * offset) /
+ static_cast<double>((*buffer)->frame_count());
+ current_time_ =
+ (*buffer)->timestamp() + base::TimeDelta::FromMicroseconds(
+ static_cast<int64>(time_offset + 0.5));
+ }
+}
+
+} // namespace media
diff --git a/media/base/audio_buffer_queue.h b/media/base/audio_buffer_queue.h
new file mode 100644
index 0000000..5ae9e21
--- /dev/null
+++ b/media/base/audio_buffer_queue.h
@@ -0,0 +1,95 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef MEDIA_BASE_AUDIO_BUFFER_QUEUE_H_
+#define MEDIA_BASE_AUDIO_BUFFER_QUEUE_H_
+
+#include <deque>
+
+#include "base/basictypes.h"
+#include "base/memory/scoped_ptr.h"
+#include "media/base/audio_buffer.h"
+#include "media/base/media_export.h"
+
+namespace media {
+
+class AudioBus;
+
+// A queue of AudioBuffers to support reading of arbitrary chunks of a media
+// data source. Audio data can be copied into an AudioBus for output. The
+// current position can be forwarded to anywhere in the buffered data.
+//
+// This class is not inherently thread-safe. Concurrent access must be
+// externally serialized.
+class MEDIA_EXPORT AudioBufferQueue {
+ public:
+ AudioBufferQueue();
+ ~AudioBufferQueue();
+
+ // Clears the buffer queue.
+ void Clear();
+
+ // Appends |buffer_in| to this queue.
+ void Append(const scoped_refptr<AudioBuffer>& buffer_in);
+
+ // Reads a maximum of |frames| frames into |dest| from the current position.
+ // Returns the number of frames read. The current position will advance by the
+ // amount of frames read.
+ int ReadFrames(int frames, AudioBus* dest);
+
+ // Copies up to |frames| frames from current position to |dest|. Returns
+ // number of frames copied. Doesn't advance current position. Starts at
+ // |forward_offset| from current position.
+ int PeekFrames(int frames, int forward_offset, AudioBus* dest);
+
+ // Moves the current position forward by |frames| frames. If |frames| exceeds
+ // frames available, the seek operation will fail.
+ void SeekFrames(int frames);
+
+ // Returns the number of frames buffered beyond the current position.
+ int frames() const { return frames_; }
+
+ // Returns the current timestamp, taking into account current offset. The
+ // value calculated based on the timestamp of the current buffer. If timestamp
+ // for the current buffer is set to 0, then returns value that corresponds to
+ // the last position in a buffer that had timestamp set. kNoTimestamp() is
+ // returned if no buffers we read from had timestamp set.
+ base::TimeDelta current_time() const { return current_time_; }
+
+ private:
+ // Definition of the buffer queue.
+ typedef std::deque<scoped_refptr<AudioBuffer> > BufferQueue;
+
+ // An internal method shared by ReadFrames() and SeekFrames() that actually
+ // does reading. It reads a maximum of |frames| frames into |dest|. Returns
+ // the number of frames read. The current position will be moved forward by
+ // the number of frames read if |advance_position| is set. If |dest| is NULL,
+ // only the current position will advance but no data will be copied.
+ // |forward_offset| can be used to skip frames before reading.
+ int InternalRead(int frames,
+ bool advance_position,
+ int forward_offset,
+ AudioBus* dest);
+
+ // Updates |current_time_| with the time that corresponds to the specified
+ // position in the buffer.
+ void UpdateCurrentTime(BufferQueue::iterator buffer, int offset);
+
+ BufferQueue::iterator current_buffer_;
+ BufferQueue buffers_;
+ int current_buffer_offset_;
+
+ // Number of frames available to be read in the buffer.
+ int frames_;
+
+ // Keeps track of the most recent time we've seen in case the |buffers_| is
+ // empty when our owner asks what time it is.
+ base::TimeDelta current_time_;
+
+ DISALLOW_COPY_AND_ASSIGN(AudioBufferQueue);
+};
+
+} // namespace media
+
+#endif // MEDIA_BASE_AUDIO_BUFFER_QUEUE_H_
diff --git a/media/base/audio_buffer_queue_unittest.cc b/media/base/audio_buffer_queue_unittest.cc
new file mode 100644
index 0000000..c898c28
--- /dev/null
+++ b/media/base/audio_buffer_queue_unittest.cc
@@ -0,0 +1,456 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/basictypes.h"
+#include "base/logging.h"
+#include "base/memory/scoped_ptr.h"
+#include "base/strings/stringprintf.h"
+#include "base/time.h"
+#include "media/base/audio_buffer.h"
+#include "media/base/audio_buffer_queue.h"
+#include "media/base/audio_bus.h"
+#include "media/base/buffers.h"
+#include "media/base/test_helpers.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace media {
+
+static void VerifyResult(float* channel_data,
+ int frames,
+ float start,
+ float increment) {
+ for (int i = 0; i < frames; ++i) {
+ SCOPED_TRACE(base::StringPrintf(
+ "i=%d/%d start=%f, increment=%f", i, frames, start, increment));
+ ASSERT_EQ(start, channel_data[i]);
+ start += increment;
+ }
+}
+
+TEST(AudioBufferQueueTest, AppendAndClear) {
+ const int channels = 1;
+ const int frames = 8;
+ const base::TimeDelta start_time;
+ AudioBufferQueue buffer;
+ EXPECT_EQ(0, buffer.frames());
+ buffer.Append(MakeInterleavedAudioBuffer<uint8>(
+ kSampleFormatU8, channels, 10, 1, frames, start_time));
+ EXPECT_EQ(frames, buffer.frames());
+ buffer.Clear();
+ EXPECT_EQ(0, buffer.frames());
+ buffer.Append(MakeInterleavedAudioBuffer<uint8>(
+ kSampleFormatU8, channels, 20, 1, frames, start_time));
+ EXPECT_EQ(frames, buffer.frames());
+}
+
+TEST(AudioBufferQueueTest, MultipleAppend) {
+ const int channels = 1;
+ const base::TimeDelta start_time;
+ AudioBufferQueue buffer;
+
+ // Append 40 frames in 5 buffers.
+ buffer.Append(MakeInterleavedAudioBuffer<uint8>(
+ kSampleFormatU8, channels, 10, 1, 8, start_time));
+ EXPECT_EQ(8, buffer.frames());
+ buffer.Append(MakeInterleavedAudioBuffer<uint8>(
+ kSampleFormatU8, channels, 10, 1, 8, start_time));
+ EXPECT_EQ(16, buffer.frames());
+ buffer.Append(MakeInterleavedAudioBuffer<uint8>(
+ kSampleFormatU8, channels, 10, 1, 8, start_time));
+ EXPECT_EQ(24, buffer.frames());
+ buffer.Append(MakeInterleavedAudioBuffer<uint8>(
+ kSampleFormatU8, channels, 10, 1, 8, start_time));
+ EXPECT_EQ(32, buffer.frames());
+ buffer.Append(MakeInterleavedAudioBuffer<uint8>(
+ kSampleFormatU8, channels, 10, 1, 8, start_time));
+ EXPECT_EQ(40, buffer.frames());
+}
+
+TEST(AudioBufferQueueTest, IteratorCheck) {
+ const int channels = 1;
+ const base::TimeDelta start_time;
+ AudioBufferQueue buffer;
+ scoped_ptr<AudioBus> bus = AudioBus::Create(channels, 100);
+
+ // Append 40 frames in 5 buffers. Intersperse ReadFrames() to make the
+ // iterator is pointing to the correct position.
+ buffer.Append(MakeInterleavedAudioBuffer<float>(
+ kSampleFormatF32, channels, 10.0f, 1.0f, 8, start_time));
+ EXPECT_EQ(8, buffer.frames());
+
+ EXPECT_EQ(4, buffer.ReadFrames(4, bus.get()));
+ EXPECT_EQ(4, buffer.frames());
+ VerifyResult(bus->channel(0), 4, 10.0f, 1.0f);
+
+ buffer.Append(MakeInterleavedAudioBuffer<float>(
+ kSampleFormatF32, channels, 20.0f, 1.0f, 8, start_time));
+ EXPECT_EQ(12, buffer.frames());
+ buffer.Append(MakeInterleavedAudioBuffer<float>(
+ kSampleFormatF32, channels, 30.0f, 1.0f, 8, start_time));
+ EXPECT_EQ(20, buffer.frames());
+
+ buffer.SeekFrames(16);
+ EXPECT_EQ(4, buffer.ReadFrames(4, bus.get()));
+ EXPECT_EQ(0, buffer.frames());
+ VerifyResult(bus->channel(0), 4, 34.0f, 1.0f);
+
+ buffer.Append(MakeInterleavedAudioBuffer<float>(
+ kSampleFormatF32, channels, 40.0f, 1.0f, 8, start_time));
+ EXPECT_EQ(8, buffer.frames());
+ buffer.Append(MakeInterleavedAudioBuffer<float>(
+ kSampleFormatF32, channels, 50.0f, 1.0f, 8, start_time));
+ EXPECT_EQ(16, buffer.frames());
+
+ EXPECT_EQ(4, buffer.ReadFrames(4, bus.get()));
+ VerifyResult(bus->channel(0), 4, 40.0f, 1.0f);
+
+ // Read off the end of the buffer.
+ EXPECT_EQ(12, buffer.frames());
+ buffer.SeekFrames(8);
+ EXPECT_EQ(4, buffer.ReadFrames(100, bus.get()));
+ VerifyResult(bus->channel(0), 4, 54.0f, 1.0f);
+}
+
+TEST(AudioBufferQueueTest, Seek) {
+ const int channels = 2;
+ const base::TimeDelta start_time;
+ AudioBufferQueue buffer;
+
+ // Add 6 frames of data.
+ buffer.Append(MakeInterleavedAudioBuffer<float>(
+ kSampleFormatF32, channels, 1.0f, 1.0f, 6, start_time));
+ EXPECT_EQ(6, buffer.frames());
+
+ // Seek past 2 frames.
+ buffer.SeekFrames(2);
+ EXPECT_EQ(4, buffer.frames());
+
+ // Seek to end of data.
+ buffer.SeekFrames(4);
+ EXPECT_EQ(0, buffer.frames());
+
+ // At end, seek now fails unless 0 specified.
+ buffer.SeekFrames(0);
+}
+
+TEST(AudioBufferQueueTest, ReadF32) {
+ const int channels = 2;
+ const base::TimeDelta start_time;
+ AudioBufferQueue buffer;
+
+ // Add 76 frames of data.
+ buffer.Append(MakeInterleavedAudioBuffer<float>(
+ kSampleFormatF32, channels, 1.0f, 1.0f, 6, start_time));
+ buffer.Append(MakeInterleavedAudioBuffer<float>(
+ kSampleFormatF32, channels, 13.0f, 1.0f, 10, start_time));
+ buffer.Append(MakeInterleavedAudioBuffer<float>(
+ kSampleFormatF32, channels, 33.0f, 1.0f, 60, start_time));
+ EXPECT_EQ(76, buffer.frames());
+
+ // Read 3 frames from the buffer. F32 is interleaved, so ch[0] should be
+ // 1, 3, 5, and ch[1] should be 2, 4, 6.
+ scoped_ptr<AudioBus> bus = AudioBus::Create(channels, 100);
+ EXPECT_EQ(3, buffer.ReadFrames(3, bus.get()));
+ EXPECT_EQ(73, buffer.frames());
+ VerifyResult(bus->channel(0), 3, 1.0f, 2.0f);
+ VerifyResult(bus->channel(1), 3, 2.0f, 2.0f);
+
+ // Now read 5 frames, which will span buffers.
+ EXPECT_EQ(5, buffer.ReadFrames(5, bus.get()));
+ EXPECT_EQ(68, buffer.frames());
+ VerifyResult(bus->channel(0), 5, 7.0f, 2.0f);
+ VerifyResult(bus->channel(1), 5, 8.0f, 2.0f);
+
+ // Now skip into the third buffer.
+ buffer.SeekFrames(20);
+ EXPECT_EQ(48, buffer.frames());
+
+ // Now read 2 frames, which are in the third buffer.
+ EXPECT_EQ(2, buffer.ReadFrames(2, bus.get()));
+ VerifyResult(bus->channel(0), 2, 57.0f, 2.0f);
+ VerifyResult(bus->channel(1), 2, 58.0f, 2.0f);
+}
+
+TEST(AudioBufferQueueTest, ReadU8) {
+ const int channels = 4;
+ const base::TimeDelta start_time;
+ AudioBufferQueue buffer;
+
+ // Add 4 frames of data.
+ buffer.Append(MakeInterleavedAudioBuffer<uint8>(
+ kSampleFormatU8, channels, 128, 1, 4, start_time));
+
+ // Read all 4 frames from the buffer. Data is interleaved, so ch[0] should be
+ // 128, 132, 136, 140, other channels similar. However, values are converted
+ // from [0, 255] to [-1.0, 1.0] with a bias of 128. Thus the first buffer
+ // value should be 0.0, then 1/127, 2/127, etc.
+ scoped_ptr<AudioBus> bus = AudioBus::Create(channels, 100);
+ EXPECT_EQ(4, buffer.ReadFrames(4, bus.get()));
+ EXPECT_EQ(0, buffer.frames());
+ VerifyResult(bus->channel(0), 4, 0.0f, 4.0f / 127.0f);
+ VerifyResult(bus->channel(1), 4, 1.0f / 127.0f, 4.0f / 127.0f);
+ VerifyResult(bus->channel(2), 4, 2.0f / 127.0f, 4.0f / 127.0f);
+ VerifyResult(bus->channel(3), 4, 3.0f / 127.0f, 4.0f / 127.0f);
+}
+
+TEST(AudioBufferQueueTest, ReadS16) {
+ const int channels = 2;
+ const base::TimeDelta start_time;
+ AudioBufferQueue buffer;
+
+ // Add 24 frames of data.
+ buffer.Append(MakeInterleavedAudioBuffer<int16>(
+ kSampleFormatS16, channels, 1, 1, 4, start_time));
+ buffer.Append(MakeInterleavedAudioBuffer<int16>(
+ kSampleFormatS16, channels, 9, 1, 20, start_time));
+ EXPECT_EQ(24, buffer.frames());
+
+ // Read 6 frames from the buffer. Data is interleaved, so ch[0] should be
+ // 1, 3, 5, 7, 9, 11, and ch[1] should be 2, 4, 6, 8, 10, 12.
+ // Data is converted to float from -1.0 to 1.0 based on int16 range.
+ scoped_ptr<AudioBus> bus = AudioBus::Create(channels, 100);
+ EXPECT_EQ(6, buffer.ReadFrames(6, bus.get()));
+ EXPECT_EQ(18, buffer.frames());
+ VerifyResult(bus->channel(0), 6, 1.0f / kint16max, 2.0f / kint16max);
+ VerifyResult(bus->channel(1), 6, 2.0f / kint16max, 2.0f / kint16max);
+}
+
+TEST(AudioBufferQueueTest, ReadS32) {
+ const int channels = 2;
+ const base::TimeDelta start_time;
+ AudioBufferQueue buffer;
+
+ // Add 24 frames of data.
+ buffer.Append(MakeInterleavedAudioBuffer<int32>(
+ kSampleFormatS32, channels, 1, 1, 4, start_time));
+ buffer.Append(MakeInterleavedAudioBuffer<int32>(
+ kSampleFormatS32, channels, 9, 1, 20, start_time));
+ EXPECT_EQ(24, buffer.frames());
+
+ // Read 6 frames from the buffer. Data is interleaved, so ch[0] should be
+ // 1, 3, 5, 7, 100, 106, and ch[1] should be 2, 4, 6, 8, 103, 109.
+ // Data is converted to float from -1.0 to 1.0 based on int32 range.
+ scoped_ptr<AudioBus> bus = AudioBus::Create(channels, 100);
+ EXPECT_EQ(6, buffer.ReadFrames(6, bus.get()));
+ EXPECT_EQ(18, buffer.frames());
+ VerifyResult(bus->channel(0), 6, 1.0f / kint32max, 2.0f / kint32max);
+ VerifyResult(bus->channel(1), 6, 2.0f / kint32max, 2.0f / kint32max);
+
+ // Read the next 2 frames.
+ EXPECT_EQ(2, buffer.ReadFrames(2, bus.get()));
+ EXPECT_EQ(16, buffer.frames());
+ VerifyResult(bus->channel(0), 2, 13.0f / kint32max, 2.0f / kint32max);
+ VerifyResult(bus->channel(1), 2, 14.0f / kint32max, 2.0f / kint32max);
+}
+
+TEST(AudioBufferQueueTest, ReadF32Planar) {
+ const int channels = 2;
+ const base::TimeDelta start_time;
+ AudioBufferQueue buffer;
+
+ // Add 14 frames of data.
+ buffer.Append(MakePlanarAudioBuffer<float>(
+ kSampleFormatPlanarF32, channels, 1.0f, 1.0f, 4, start_time));
+ buffer.Append(MakePlanarAudioBuffer<float>(
+ kSampleFormatPlanarF32, channels, 50.0f, 1.0f, 10, start_time));
+ EXPECT_EQ(14, buffer.frames());
+
+ // Read 6 frames from the buffer. F32 is planar, so ch[0] should be
+ // 1, 2, 3, 4, 50, 51, and ch[1] should be 5, 6, 7, 8, 60, 61.
+ scoped_ptr<AudioBus> bus = AudioBus::Create(channels, 100);
+ EXPECT_EQ(6, buffer.ReadFrames(6, bus.get()));
+ EXPECT_EQ(8, buffer.frames());
+ VerifyResult(bus->channel(0), 4, 1.0f, 1.0f);
+ VerifyResult(bus->channel(0) + 4, 2, 50.0f, 1.0f);
+ VerifyResult(bus->channel(1), 4, 5.0f, 1.0f);
+ VerifyResult(bus->channel(1) + 4, 2, 60.0f, 1.0f);
+}
+
+TEST(AudioBufferQueueTest, ReadS16Planar) {
+ const int channels = 2;
+ const base::TimeDelta start_time;
+ AudioBufferQueue buffer;
+
+ // Add 24 frames of data.
+ buffer.Append(MakePlanarAudioBuffer<int16>(
+ kSampleFormatPlanarS16, channels, 1, 1, 4, start_time));
+ buffer.Append(MakePlanarAudioBuffer<int16>(
+ kSampleFormatPlanarS16, channels, 100, 5, 20, start_time));
+ EXPECT_EQ(24, buffer.frames());
+
+ // Read 6 frames from the buffer. Data is planar, so ch[0] should be
+ // 1, 2, 3, 4, 100, 105, and ch[1] should be 5, 6, 7, 8, 200, 205.
+ // Data is converted to float from -1.0 to 1.0 based on int16 range.
+ scoped_ptr<AudioBus> bus = AudioBus::Create(channels, 100);
+ EXPECT_EQ(6, buffer.ReadFrames(6, bus.get()));
+ EXPECT_EQ(18, buffer.frames());
+ VerifyResult(bus->channel(0), 4, 1.0f / kint16max, 1.0f / kint16max);
+ VerifyResult(bus->channel(0) + 4, 2, 100.0f / kint16max, 5.0f / kint16max);
+ VerifyResult(bus->channel(1), 4, 5.0f / kint16max, 1.0f / kint16max);
+ VerifyResult(bus->channel(1) + 4, 2, 200.0f / kint16max, 5.0f / kint16max);
+}
+
+TEST(AudioBufferQueueTest, ReadManyChannels) {
+ const int channels = 16;
+ const base::TimeDelta start_time;
+ AudioBufferQueue buffer;
+
+ // Add 76 frames of data.
+ buffer.Append(MakeInterleavedAudioBuffer<float>(
+ kSampleFormatF32, channels, 0.0f, 1.0f, 6, start_time));
+ buffer.Append(MakeInterleavedAudioBuffer<float>(
+ kSampleFormatF32, channels, 6.0f * channels, 1.0f, 10, start_time));
+ buffer.Append(MakeInterleavedAudioBuffer<float>(
+ kSampleFormatF32, channels, 16.0f * channels, 1.0f, 60, start_time));
+ EXPECT_EQ(76, buffer.frames());
+
+ // Read 3 frames from the buffer. F32 is interleaved, so ch[0] should be
+ // 1, 17, 33, and ch[1] should be 2, 18, 34. Just check a few channels.
+ scoped_ptr<AudioBus> bus = AudioBus::Create(channels, 100);
+ EXPECT_EQ(30, buffer.ReadFrames(30, bus.get()));
+ EXPECT_EQ(46, buffer.frames());
+ for (int i = 0; i < channels; ++i) {
+ VerifyResult(bus->channel(i), 30, static_cast<float>(i), 16.0f);
+ }
+}
+
+TEST(AudioBufferQueueTest, Peek) {
+ const int channels = 4;
+ const base::TimeDelta start_time;
+ AudioBufferQueue buffer;
+
+ // Add 60 frames of data.
+ buffer.Append(MakeInterleavedAudioBuffer<float>(
+ kSampleFormatF32, channels, 0.0f, 1.0f, 60, start_time));
+ EXPECT_EQ(60, buffer.frames());
+
+ // Peek at the first 30 frames.
+ scoped_ptr<AudioBus> bus1 = AudioBus::Create(channels, 100);
+ EXPECT_EQ(60, buffer.frames());
+ EXPECT_EQ(60, buffer.PeekFrames(100, 0, bus1.get())); // only 60 in buffer.
+ EXPECT_EQ(30, buffer.PeekFrames(30, 0, bus1.get())); // should get first 30.
+ EXPECT_EQ(60, buffer.frames());
+
+ // Now read the next 30 frames (which should be the same as those peeked at).
+ scoped_ptr<AudioBus> bus2 = AudioBus::Create(channels, 100);
+ EXPECT_EQ(30, buffer.ReadFrames(30, bus2.get()));
+ for (int i = 0; i < channels; ++i) {
+ VerifyResult(bus1->channel(i),
+ 30,
+ static_cast<float>(i),
+ static_cast<float>(channels));
+ VerifyResult(bus2->channel(i),
+ 30,
+ static_cast<float>(i),
+ static_cast<float>(channels));
+ }
+
+ // Peek 10 frames forward
+ EXPECT_EQ(5, buffer.PeekFrames(5, 10, bus1.get()));
+ for (int i = 0; i < channels; ++i) {
+ VerifyResult(bus1->channel(i),
+ 5,
+ static_cast<float>(i + 40 * channels),
+ static_cast<float>(channels));
+ }
+
+ // Peek to the end of the buffer.
+ EXPECT_EQ(30, buffer.frames());
+ EXPECT_EQ(30, buffer.PeekFrames(100, 0, bus1.get()));
+ EXPECT_EQ(30, buffer.PeekFrames(30, 0, bus1.get()));
+}
+
+TEST(AudioBufferQueueTest, Time) {
+ const int channels = 2;
+ const base::TimeDelta start_time1;
+ const base::TimeDelta start_time2 = base::TimeDelta::FromSeconds(30);
+ AudioBufferQueue buffer;
+ scoped_ptr<AudioBus> bus = AudioBus::Create(channels, 100);
+
+ // Add two buffers (second one added later):
+ // first: start=0s, duration=10s
+ // second: start=30s, duration=10s
+ buffer.Append(MakeInterleavedAudioBuffer<int16>(
+ kSampleFormatS16, channels, 1, 1, 10, start_time1));
+ EXPECT_EQ(10, buffer.frames());
+
+ // Check starting time.
+ EXPECT_EQ(start_time1, buffer.current_time());
+
+ // Read 2 frames, should be 2s in (since duration is 1s per sample).
+ EXPECT_EQ(2, buffer.ReadFrames(2, bus.get()));
+ EXPECT_EQ(start_time1 + base::TimeDelta::FromSeconds(2),
+ buffer.current_time());
+
+ // Skip 2 frames.
+ buffer.SeekFrames(2);
+ EXPECT_EQ(start_time1 + base::TimeDelta::FromSeconds(4),
+ buffer.current_time());
+
+ // Add second buffer for more data.
+ buffer.Append(MakeInterleavedAudioBuffer<int16>(
+ kSampleFormatS16, channels, 1, 1, 10, start_time2));
+ EXPECT_EQ(16, buffer.frames());
+
+ // Read until almost the end of buffer1.
+ EXPECT_EQ(5, buffer.ReadFrames(5, bus.get()));
+ EXPECT_EQ(start_time1 + base::TimeDelta::FromSeconds(9),
+ buffer.current_time());
+
+ // Read 1 value, so time moved to buffer2.
+ EXPECT_EQ(1, buffer.ReadFrames(1, bus.get()));
+ EXPECT_EQ(start_time2, buffer.current_time());
+
+ // Read all 10 frames in buffer2, timestamp should be last time from buffer2.
+ EXPECT_EQ(10, buffer.ReadFrames(10, bus.get()));
+ EXPECT_EQ(start_time2 + base::TimeDelta::FromSeconds(10),
+ buffer.current_time());
+
+ // Try to read more frames (which don't exist), timestamp should remain.
+ EXPECT_EQ(0, buffer.ReadFrames(5, bus.get()));
+ EXPECT_EQ(start_time2 + base::TimeDelta::FromSeconds(10),
+ buffer.current_time());
+}
+
+TEST(AudioBufferQueueTest, NoTime) {
+ const int channels = 2;
+ AudioBufferQueue buffer;
+ scoped_ptr<AudioBus> bus = AudioBus::Create(channels, 100);
+
+ // Add two buffers with no timestamps. Time should always be unknown.
+ buffer.Append(MakeInterleavedAudioBuffer<int16>(
+ kSampleFormatS16, channels, 1, 1, 10, kNoTimestamp()));
+ buffer.Append(MakeInterleavedAudioBuffer<int16>(
+ kSampleFormatS16, channels, 1, 1, 10, kNoTimestamp()));
+ EXPECT_EQ(20, buffer.frames());
+
+ // Check starting time.
+ EXPECT_EQ(kNoTimestamp(), buffer.current_time());
+
+ // Read 2 frames.
+ EXPECT_EQ(2, buffer.ReadFrames(2, bus.get()));
+ EXPECT_EQ(kNoTimestamp(), buffer.current_time());
+
+ // Skip 2 frames.
+ buffer.SeekFrames(2);
+ EXPECT_EQ(kNoTimestamp(), buffer.current_time());
+
+ // Read until almost the end of buffer1.
+ EXPECT_EQ(5, buffer.ReadFrames(5, bus.get()));
+ EXPECT_EQ(kNoTimestamp(), buffer.current_time());
+
+ // Read 1 value, so time moved to buffer2.
+ EXPECT_EQ(1, buffer.ReadFrames(1, bus.get()));
+ EXPECT_EQ(kNoTimestamp(), buffer.current_time());
+
+ // Read all 10 frames in buffer2.
+ EXPECT_EQ(10, buffer.ReadFrames(10, bus.get()));
+ EXPECT_EQ(kNoTimestamp(), buffer.current_time());
+
+ // Try to read more frames (which don't exist), timestamp should remain.
+ EXPECT_EQ(0, buffer.ReadFrames(5, bus.get()));
+ EXPECT_EQ(kNoTimestamp(), buffer.current_time());
+}
+
+} // namespace media
diff --git a/media/base/audio_buffer_unittest.cc b/media/base/audio_buffer_unittest.cc
index 286953e..1c01354 100644
--- a/media/base/audio_buffer_unittest.cc
+++ b/media/base/audio_buffer_unittest.cc
@@ -6,79 +6,11 @@
#include "base/strings/stringprintf.h"
#include "media/base/audio_buffer.h"
#include "media/base/audio_bus.h"
+#include "media/base/test_helpers.h"
#include "testing/gtest/include/gtest/gtest.h"
namespace media {
-template <class T>
-static scoped_refptr<AudioBuffer> MakeInterleavedBuffer(
- SampleFormat format,
- int channels,
- T start,
- T increment,
- int frames,
- const base::TimeDelta start_time) {
- DCHECK(format == kSampleFormatU8 || format == kSampleFormatS16 ||
- format == kSampleFormatS32 || format == kSampleFormatF32);
-
- // Create a block of memory with values:
- // start
- // start + increment
- // start + 2 * increment, ...
- // Since this is interleaved data, channel 0 data will be:
- // start
- // start + channels * increment
- // start + 2 * channels * increment, ...
- int buffer_size = frames * channels * sizeof(T);
- scoped_ptr<uint8[]> memory(new uint8[buffer_size]);
- uint8* data[] = { memory.get() };
- T* buffer = reinterpret_cast<T*>(memory.get());
- for (int i = 0; i < frames * channels; ++i) {
- buffer[i] = start;
- start += increment;
- }
- // Duration is 1 second per frame (for simplicity).
- base::TimeDelta duration = base::TimeDelta::FromSeconds(frames);
- return AudioBuffer::CopyFrom(
- format, channels, frames, data, start_time, duration);
-}
-
-template <class T>
-static scoped_refptr<AudioBuffer> MakePlanarBuffer(
- SampleFormat format,
- int channels,
- T start,
- T increment,
- int frames,
- const base::TimeDelta start_time) {
- DCHECK(format == kSampleFormatPlanarF32 || format == kSampleFormatPlanarS16);
-
- // Create multiple blocks of data, once for each channel.
- // Values in channel 0 will be:
- // start
- // start + increment
- // start + 2 * increment, ...
- // Values in channel 1 will be:
- // start + frames * increment
- // start + (frames + 1) * increment
- // start + (frames + 2) * increment, ...
- int buffer_size = frames * sizeof(T);
- scoped_ptr<uint8*[]> data(new uint8*[channels]);
- scoped_ptr<uint8[]> memory(new uint8[channels * buffer_size]);
- for (int i = 0; i < channels; ++i) {
- data.get()[i] = memory.get() + i * buffer_size;
- T* buffer = reinterpret_cast<T*>(data.get()[i]);
- for (int j = 0; j < frames; ++j) {
- buffer[j] = start;
- start += increment;
- }
- }
- // Duration is 1 second per frame (for simplicity).
- base::TimeDelta duration = base::TimeDelta::FromSeconds(frames);
- return AudioBuffer::CopyFrom(
- format, channels, frames, data.get(), start_time, duration);
-}
-
static void VerifyResult(float* channel_data,
int frames,
float start,
@@ -95,7 +27,7 @@ TEST(AudioBufferTest, CopyFrom) {
const int channels = 1;
const int frames = 8;
const base::TimeDelta start_time;
- scoped_refptr<AudioBuffer> buffer = MakeInterleavedBuffer<uint8>(
+ scoped_refptr<AudioBuffer> buffer = MakeInterleavedAudioBuffer<uint8>(
kSampleFormatU8, channels, 1, 1, frames, start_time);
EXPECT_EQ(frames, buffer->frame_count());
EXPECT_EQ(buffer->timestamp(), start_time);
@@ -129,7 +61,7 @@ TEST(AudioBufferTest, ReadU8) {
const int channels = 4;
const int frames = 4;
const base::TimeDelta start_time;
- scoped_refptr<AudioBuffer> buffer = MakeInterleavedBuffer<uint8>(
+ scoped_refptr<AudioBuffer> buffer = MakeInterleavedAudioBuffer<uint8>(
kSampleFormatU8, channels, 128, 1, frames, start_time);
// Read all 4 frames from the buffer. Data is interleaved, so ch[0] should be
@@ -148,7 +80,7 @@ TEST(AudioBufferTest, ReadS16) {
const int channels = 2;
const int frames = 10;
const base::TimeDelta start_time;
- scoped_refptr<AudioBuffer> buffer = MakeInterleavedBuffer<int16>(
+ scoped_refptr<AudioBuffer> buffer = MakeInterleavedAudioBuffer<int16>(
kSampleFormatS16, channels, 1, 1, frames, start_time);
// Read 6 frames from the buffer. Data is interleaved, so ch[0] should be 1,
@@ -172,7 +104,7 @@ TEST(AudioBufferTest, ReadS32) {
const int channels = 2;
const int frames = 6;
const base::TimeDelta start_time;
- scoped_refptr<AudioBuffer> buffer = MakeInterleavedBuffer<int32>(
+ scoped_refptr<AudioBuffer> buffer = MakeInterleavedAudioBuffer<int32>(
kSampleFormatS32, channels, 1, 1, frames, start_time);
// Read 6 frames from the buffer. Data is interleaved, so ch[0] should be 1,
@@ -194,7 +126,7 @@ TEST(AudioBufferTest, ReadF32) {
const int channels = 2;
const int frames = 20;
const base::TimeDelta start_time;
- scoped_refptr<AudioBuffer> buffer = MakeInterleavedBuffer<float>(
+ scoped_refptr<AudioBuffer> buffer = MakeInterleavedAudioBuffer<float>(
kSampleFormatF32, channels, 1.0f, 1.0f, frames, start_time);
// Read first 10 frames from the buffer. F32 is interleaved, so ch[0] should
@@ -215,7 +147,7 @@ TEST(AudioBufferTest, ReadS16Planar) {
const int channels = 2;
const int frames = 20;
const base::TimeDelta start_time;
- scoped_refptr<AudioBuffer> buffer = MakePlanarBuffer<int16>(
+ scoped_refptr<AudioBuffer> buffer = MakePlanarAudioBuffer<int16>(
kSampleFormatPlanarS16, channels, 1, 1, frames, start_time);
// Read 6 frames from the buffer. Data is planar, so ch[0] should be 1, 2, 3,
@@ -247,7 +179,7 @@ TEST(AudioBufferTest, ReadF32Planar) {
const int channels = 4;
const int frames = 100;
const base::TimeDelta start_time;
- scoped_refptr<AudioBuffer> buffer = MakePlanarBuffer<float>(
+ scoped_refptr<AudioBuffer> buffer = MakePlanarAudioBuffer<float>(
kSampleFormatPlanarF32, channels, 1.0f, 1.0f, frames, start_time);
// Read all 100 frames from the buffer. F32 is planar, so ch[0] should be 1,
diff --git a/media/base/test_helpers.cc b/media/base/test_helpers.cc
index 2c71865..368774b 100644
--- a/media/base/test_helpers.cc
+++ b/media/base/test_helpers.cc
@@ -5,9 +5,12 @@
#include "media/base/test_helpers.h"
#include "base/bind.h"
+#include "base/logging.h"
#include "base/message_loop.h"
#include "base/test/test_timeouts.h"
+#include "base/time.h"
#include "base/timer.h"
+#include "media/base/audio_buffer.h"
#include "media/base/bind_to_loop.h"
#include "ui/gfx/rect.h"
@@ -143,4 +146,100 @@ gfx::Size TestVideoConfig::LargeCodedSize() {
return kLargeSize;
}
+template <class T>
+scoped_refptr<AudioBuffer> MakeInterleavedAudioBuffer(
+ SampleFormat format,
+ int channels,
+ T start,
+ T increment,
+ int frames,
+ base::TimeDelta start_time) {
+ DCHECK(format == kSampleFormatU8 || format == kSampleFormatS16 ||
+ format == kSampleFormatS32 || format == kSampleFormatF32);
+
+ // Create a block of memory with values:
+ // start
+ // start + increment
+ // start + 2 * increment, ...
+ // Since this is interleaved data, channel 0 data will be:
+ // start
+ // start + channels * increment
+ // start + 2 * channels * increment, ...
+ int buffer_size = frames * channels * sizeof(T);
+ scoped_ptr<uint8[]> memory(new uint8[buffer_size]);
+ uint8* data[] = { memory.get() };
+ T* buffer = reinterpret_cast<T*>(memory.get());
+ for (int i = 0; i < frames * channels; ++i) {
+ buffer[i] = start;
+ start += increment;
+ }
+ // Duration is 1 second per frame (for simplicity).
+ base::TimeDelta duration = base::TimeDelta::FromSeconds(frames);
+ return AudioBuffer::CopyFrom(
+ format, channels, frames, data, start_time, duration);
+}
+
+template <class T>
+scoped_refptr<AudioBuffer> MakePlanarAudioBuffer(
+ SampleFormat format,
+ int channels,
+ T start,
+ T increment,
+ int frames,
+ base::TimeDelta start_time) {
+ DCHECK(format == kSampleFormatPlanarF32 || format == kSampleFormatPlanarS16);
+
+ // Create multiple blocks of data, one for each channel.
+ // Values in channel 0 will be:
+ // start
+ // start + increment
+ // start + 2 * increment, ...
+ // Values in channel 1 will be:
+ // start + frames * increment
+ // start + (frames + 1) * increment
+ // start + (frames + 2) * increment, ...
+ int buffer_size = frames * sizeof(T);
+ scoped_ptr<uint8*[]> data(new uint8*[channels]);
+ scoped_ptr<uint8[]> memory(new uint8[channels * buffer_size]);
+ for (int i = 0; i < channels; ++i) {
+ data.get()[i] = memory.get() + i * buffer_size;
+ T* buffer = reinterpret_cast<T*>(data.get()[i]);
+ for (int j = 0; j < frames; ++j) {
+ buffer[j] = start;
+ start += increment;
+ }
+ }
+ // Duration is 1 second per frame (for simplicity).
+ base::TimeDelta duration = base::TimeDelta::FromSeconds(frames);
+ return AudioBuffer::CopyFrom(
+ format, channels, frames, data.get(), start_time, duration);
+}
+
+// Instantiate all the types of MakeInterleavedAudioBuffer() and
+// MakePlanarAudioBuffer() needed.
+
+#define DEFINE_INTERLEAVED_INSTANCE(type) \
+ template scoped_refptr<AudioBuffer> MakeInterleavedAudioBuffer<type>( \
+ SampleFormat format, \
+ int channels, \
+ type start, \
+ type increment, \
+ int frames, \
+ base::TimeDelta start_time)
+DEFINE_INTERLEAVED_INSTANCE(uint8);
+DEFINE_INTERLEAVED_INSTANCE(int16);
+DEFINE_INTERLEAVED_INSTANCE(int32);
+DEFINE_INTERLEAVED_INSTANCE(float);
+
+#define DEFINE_PLANAR_INSTANCE(type) \
+ template scoped_refptr<AudioBuffer> MakePlanarAudioBuffer<type>( \
+ SampleFormat format, \
+ int channels, \
+ type start, \
+ type increment, \
+ int frames, \
+ base::TimeDelta start_time);
+DEFINE_PLANAR_INSTANCE(int16);
+DEFINE_PLANAR_INSTANCE(float);
+
} // namespace media
diff --git a/media/base/test_helpers.h b/media/base/test_helpers.h
index bc505d4..7eebfd9 100644
--- a/media/base/test_helpers.h
+++ b/media/base/test_helpers.h
@@ -8,16 +8,20 @@
#include "base/basictypes.h"
#include "base/callback.h"
#include "media/base/pipeline_status.h"
+#include "media/base/sample_format.h"
#include "media/base/video_decoder_config.h"
#include "testing/gmock/include/gmock/gmock.h"
#include "ui/gfx/size.h"
namespace base {
class MessageLoop;
+class TimeDelta;
}
namespace media {
+class AudioBuffer;
+
// Return a callback that expects to be run once.
base::Closure NewExpectedClosure();
PipelineStatusCB NewExpectedStatusCB(PipelineStatus status);
@@ -79,6 +83,51 @@ class TestVideoConfig {
DISALLOW_IMPLICIT_CONSTRUCTORS(TestVideoConfig);
};
+// Create an AudioBuffer containing |frames| frames of data, where each sample
+// is of type T. Each frame will have the data from |channels| channels
+// interleaved. |start| and |increment| are used to specify the values for the
+// samples. Since this is interleaved data, channel 0 data will be:
+// |start|
+// |start| + |channels| * |increment|
+// |start| + 2 * |channels| * |increment|, and so on.
+// Data for subsequent channels is similar. No check is done that |format|
+// requires data to be of type T, but it is verified that |format| is an
+// interleaved format.
+//
+// |start_time| will be used as the start time for the samples. Duration is set
+// to 1 second per frame, to simplify calculations.
+template <class T>
+scoped_refptr<AudioBuffer> MakeInterleavedAudioBuffer(
+ SampleFormat format,
+ int channels,
+ T start,
+ T increment,
+ int frames,
+ base::TimeDelta start_time);
+
+// Create an AudioBuffer containing |frames| frames of data, where each sample
+// is of type T. Since this is planar data, there will be a block for each of
+// |channel| channels. |start| and |increment| are used to specify the values
+// for the samples, which are created in channel order. Since this is planar
+// data, channel 0 data will be:
+// |start|
+// |start| + |increment|
+// |start| + 2 * |increment|, and so on.
+// Data for channel 1 will follow where channel 0 ends. Subsequent channels are
+// similar. No check is done that |format| requires data to be of type T, but it
+// is verified that |format| is a planar format.
+//
+// |start_time| will be used as the start time for the samples. Duration is set
+// to 1 second per frame, to simplify calculations.
+template <class T>
+scoped_refptr<AudioBuffer> MakePlanarAudioBuffer(
+ SampleFormat format,
+ int channels,
+ T start,
+ T increment,
+ int frames,
+ base::TimeDelta start_time);
+
} // namespace media
#endif // MEDIA_BASE_TEST_HELPERS_H_