summaryrefslogtreecommitdiffstats
path: root/media
diff options
context:
space:
mode:
authorjrummell@chromium.org <jrummell@chromium.org@0039d316-1c4b-4281-b951-d872f2087c98>2013-07-10 03:56:10 +0000
committerjrummell@chromium.org <jrummell@chromium.org@0039d316-1c4b-4281-b951-d872f2087c98>2013-07-10 03:56:10 +0000
commit47b37a6c0e164891bfa3993687cd3cc3edd113c6 (patch)
tree465e76cafc34c43f1a8d027f9fea76384b82d8c3 /media
parenta6d34f348669fba08f87ec367efd9ba8bc53f8cb (diff)
downloadchromium_src-47b37a6c0e164891bfa3993687cd3cc3edd113c6.zip
chromium_src-47b37a6c0e164891bfa3993687cd3cc3edd113c6.tar.gz
chromium_src-47b37a6c0e164891bfa3993687cd3cc3edd113c6.tar.bz2
Switch audio code from DataBuffer to AudioBuffer.
These are the changes to have the audio code use the new AudioBuffer and AudioBufferQueue classes. The goal is to simplify the number of times the audio data is copied and transformed. BUG=248989 Review URL: https://chromiumcodereview.appspot.com/17737004 git-svn-id: svn://svn.chromium.org/chrome/trunk/src@210730 0039d316-1c4b-4281-b951-d872f2087c98
Diffstat (limited to 'media')
-rw-r--r--media/base/android/media_source_player.cc6
-rw-r--r--media/base/audio_buffer.cc42
-rw-r--r--media/base/audio_buffer.h36
-rw-r--r--media/base/audio_buffer_queue.cc39
-rw-r--r--media/base/audio_buffer_queue.h21
-rw-r--r--media/base/audio_buffer_queue_unittest.cc183
-rw-r--r--media/base/audio_buffer_unittest.cc88
-rw-r--r--media/base/audio_bus.cc17
-rw-r--r--media/base/audio_bus.h9
-rw-r--r--media/base/audio_decoder.h5
-rw-r--r--media/base/audio_splicer.cc55
-rw-r--r--media/base/audio_splicer.h14
-rw-r--r--media/base/audio_splicer_unittest.cc188
-rw-r--r--media/base/audio_timestamp_helper.cc27
-rw-r--r--media/base/audio_timestamp_helper.h47
-rw-r--r--media/base/audio_timestamp_helper_unittest.cc88
-rw-r--r--media/base/decryptor.h4
-rw-r--r--media/base/test_helpers.cc16
-rw-r--r--media/base/test_helpers.h14
-rw-r--r--media/filters/audio_renderer_algorithm.cc369
-rw-r--r--media/filters/audio_renderer_algorithm.h104
-rw-r--r--media/filters/audio_renderer_algorithm_unittest.cc160
-rw-r--r--media/filters/audio_renderer_impl.cc35
-rw-r--r--media/filters/audio_renderer_impl.h13
-rw-r--r--media/filters/audio_renderer_impl_unittest.cc143
-rw-r--r--media/filters/decrypting_audio_decoder.cc20
-rw-r--r--media/filters/decrypting_audio_decoder_unittest.cc43
-rw-r--r--media/filters/ffmpeg_audio_decoder.cc92
-rw-r--r--media/filters/ffmpeg_audio_decoder.h8
-rw-r--r--media/filters/ffmpeg_audio_decoder_unittest.cc6
-rw-r--r--media/filters/opus_audio_decoder.cc41
-rw-r--r--media/filters/opus_audio_decoder.h4
32 files changed, 1016 insertions, 921 deletions
diff --git a/media/base/android/media_source_player.cc b/media/base/android/media_source_player.cc
index c3380281..22bbf7b 100644
--- a/media/base/android/media_source_player.cc
+++ b/media/base/android/media_source_player.cc
@@ -459,8 +459,7 @@ void MediaSourcePlayer::DemuxerReady(
audio_extra_data_ = params.audio_extra_data;
if (HasAudio()) {
DCHECK_GT(num_channels_, 0);
- audio_timestamp_helper_.reset(new AudioTimestampHelper(
- kBytesPerAudioOutputSample * num_channels_, sampling_rate_));
+ audio_timestamp_helper_.reset(new AudioTimestampHelper(sampling_rate_));
audio_timestamp_helper_->SetBaseTimestamp(GetCurrentTime());
} else {
audio_timestamp_helper_.reset();
@@ -560,7 +559,8 @@ void MediaSourcePlayer::OnSeekRequestAck(unsigned seek_request_id) {
void MediaSourcePlayer::UpdateTimestamps(
const base::TimeDelta& presentation_timestamp, size_t audio_output_bytes) {
if (audio_output_bytes > 0) {
- audio_timestamp_helper_->AddBytes(audio_output_bytes);
+ audio_timestamp_helper_->AddFrames(
+ audio_output_bytes / (kBytesPerAudioOutputSample * num_channels_));
clock_.SetMaxTime(audio_timestamp_helper_->GetTimestamp());
} else {
clock_.SetMaxTime(presentation_timestamp);
diff --git a/media/base/audio_buffer.cc b/media/base/audio_buffer.cc
index a612a57..61296da 100644
--- a/media/base/audio_buffer.cc
+++ b/media/base/audio_buffer.cc
@@ -24,6 +24,9 @@ AudioBuffer::AudioBuffer(SampleFormat sample_format,
: sample_format_(sample_format),
channel_count_(channel_count),
frame_count_(frame_count),
+ adjusted_frame_count_(frame_count),
+ trim_start_(0),
+ end_of_stream_(data == NULL && frame_count_ == 0),
timestamp_(timestamp),
duration_(duration) {
CHECK_GE(channel_count, 0);
@@ -34,10 +37,8 @@ AudioBuffer::AudioBuffer(SampleFormat sample_format,
int data_size = frame_count * bytes_per_channel;
// Empty buffer?
- if (!data) {
- CHECK_EQ(frame_count, 0);
+ if (!data)
return;
- }
if (sample_format == kSampleFormatPlanarF32 ||
sample_format == kSampleFormatPlanarS16) {
@@ -90,6 +91,18 @@ scoped_refptr<AudioBuffer> AudioBuffer::CopyFrom(
}
// static
+scoped_refptr<AudioBuffer> AudioBuffer::CreateEmptyBuffer(
+ int channel_count,
+ int frame_count,
+ const base::TimeDelta timestamp,
+ const base::TimeDelta duration) {
+ CHECK_GT(frame_count, 0); // Otherwise looks like an EOF buffer.
+ // Since data == NULL, format doesn't matter.
+ return make_scoped_refptr(new AudioBuffer(
+ kSampleFormatF32, channel_count, frame_count, NULL, timestamp, duration));
+}
+
+// static
scoped_refptr<AudioBuffer> AudioBuffer::CreateEOSBuffer() {
return make_scoped_refptr(new AudioBuffer(
kUnknownSampleFormat, 1, 0, NULL, kNoTimestamp(), kNoTimestamp()));
@@ -111,9 +124,16 @@ void AudioBuffer::ReadFrames(int frames_to_copy,
// specified must be in range.
DCHECK(!end_of_stream());
DCHECK_EQ(dest->channels(), channel_count_);
+ source_frame_offset += trim_start_;
DCHECK_LE(source_frame_offset + frames_to_copy, frame_count_);
DCHECK_LE(dest_frame_offset + frames_to_copy, dest->frames());
+ if (!data_) {
+ // Special case for an empty buffer.
+ dest->ZeroFramesPartial(dest_frame_offset, frames_to_copy);
+ return;
+ }
+
if (sample_format_ == kSampleFormatPlanarF32) {
// Format is planar float32. Copy the data from each channel as a block.
for (int ch = 0; ch < channel_count_; ++ch) {
@@ -168,4 +188,20 @@ void AudioBuffer::ReadFrames(int frames_to_copy,
source_data, dest_frame_offset, frames_to_copy, bytes_per_channel);
}
+void AudioBuffer::TrimStart(int frames_to_trim) {
+ CHECK_LT(frames_to_trim, adjusted_frame_count_);
+ trim_start_ += frames_to_trim;
+
+ // Adjust timestamp_ and duration_ to reflect the smaller number of frames.
+ double offset = static_cast<double>(duration_.InMicroseconds()) *
+ frames_to_trim / adjusted_frame_count_;
+ base::TimeDelta offset_as_time =
+ base::TimeDelta::FromMicroseconds(static_cast<int64>(offset));
+ timestamp_ += offset_as_time;
+ duration_ -= offset_as_time;
+
+ // Finally adjust the number of frames in this buffer.
+ adjusted_frame_count_ = frame_count_ - trim_start_;
+}
+
} // namespace media
diff --git a/media/base/audio_buffer.h b/media/base/audio_buffer.h
index b0a2ce7..9200666 100644
--- a/media/base/audio_buffer.h
+++ b/media/base/audio_buffer.h
@@ -37,13 +37,20 @@ class MEDIA_EXPORT AudioBuffer
const base::TimeDelta timestamp,
const base::TimeDelta duration);
+ // Create an empty AudioBuffer with |frame_count| frames.
+ static scoped_refptr<AudioBuffer> CreateEmptyBuffer(
+ int channel_count,
+ int frame_count,
+ const base::TimeDelta timestamp,
+ const base::TimeDelta duration);
+
// Create a AudioBuffer indicating we've reached end of stream.
// Calling any method other than end_of_stream() on the resulting buffer
// is disallowed.
static scoped_refptr<AudioBuffer> CreateEOSBuffer();
// Copy frames into |dest|. |frames_to_copy| is the number of frames to copy.
- // |source_frame_offset| specified how many frames in the buffer to skip
+ // |source_frame_offset| specifies how many frames in the buffer to skip
// first. |dest_frame_offset| is the frame offset in |dest|. The frames are
// converted from their source format into planar float32 data (which is all
// that AudioBus handles).
@@ -52,15 +59,29 @@ class MEDIA_EXPORT AudioBuffer
int dest_frame_offset,
AudioBus* dest);
+ // Trim an AudioBuffer by removing |frames_to_trim| frames from the start.
+ // Note that repeated calls to TrimStart() may result in timestamp() and
+ // duration() being off by a few microseconds due to rounding issues.
+ void TrimStart(int frames_to_trim);
+
+ // Return the number of channels.
+ int channel_count() const { return channel_count_; }
+
// Return the number of frames held.
- int frame_count() const { return frame_count_; }
+ int frame_count() const { return adjusted_frame_count_; }
// Access to constructor parameters.
base::TimeDelta timestamp() const { return timestamp_; }
base::TimeDelta duration() const { return duration_; }
+ // TODO(jrummell): Remove set_timestamp() and set_duration() once
+ // DecryptingAudioDecoder::EnqueueFrames() is changed to set them when
+ // creating the buffer. See http://crbug.com/255261.
+ void set_timestamp(base::TimeDelta timestamp) { timestamp_ = timestamp; }
+ void set_duration(base::TimeDelta duration) { duration_ = duration; }
+
// If there's no data in this buffer, it represents end of stream.
- bool end_of_stream() const { return data_ == NULL; }
+ bool end_of_stream() const { return end_of_stream_; }
private:
friend class base::RefCountedThreadSafe<AudioBuffer>;
@@ -78,9 +99,12 @@ class MEDIA_EXPORT AudioBuffer
virtual ~AudioBuffer();
- SampleFormat sample_format_;
- int channel_count_;
- int frame_count_;
+ const SampleFormat sample_format_;
+ const int channel_count_;
+ const int frame_count_;
+ int adjusted_frame_count_;
+ int trim_start_;
+ const bool end_of_stream_;
base::TimeDelta timestamp_;
base::TimeDelta duration_;
diff --git a/media/base/audio_buffer_queue.cc b/media/base/audio_buffer_queue.cc
index 3fa3775..abe8fce 100644
--- a/media/base/audio_buffer_queue.cc
+++ b/media/base/audio_buffer_queue.cc
@@ -26,8 +26,7 @@ void AudioBufferQueue::Clear() {
void AudioBufferQueue::Append(const scoped_refptr<AudioBuffer>& buffer_in) {
// If we have just written the first buffer, update |current_time_| to be the
// start time.
- if (buffers_.empty()) {
- DCHECK_EQ(frames_, 0);
+ if (buffers_.empty() && buffer_in->timestamp() != kNoTimestamp()) {
current_time_ = buffer_in->timestamp();
}
@@ -41,35 +40,40 @@ void AudioBufferQueue::Append(const scoped_refptr<AudioBuffer>& buffer_in) {
CHECK_GT(frames_, 0); // make sure it doesn't overflow.
}
-int AudioBufferQueue::ReadFrames(int frames, AudioBus* dest) {
- DCHECK_GE(dest->frames(), frames);
- return InternalRead(frames, true, 0, dest);
+int AudioBufferQueue::ReadFrames(int frames,
+ int dest_frame_offset,
+ AudioBus* dest) {
+ DCHECK_GE(dest->frames(), frames + dest_frame_offset);
+ return InternalRead(frames, true, 0, dest_frame_offset, dest);
}
int AudioBufferQueue::PeekFrames(int frames,
- int forward_offset,
+ int source_frame_offset,
+ int dest_frame_offset,
AudioBus* dest) {
DCHECK_GE(dest->frames(), frames);
- return InternalRead(frames, false, forward_offset, dest);
+ return InternalRead(
+ frames, false, source_frame_offset, dest_frame_offset, dest);
}
void AudioBufferQueue::SeekFrames(int frames) {
// Perform seek only if we have enough bytes in the queue.
CHECK_LE(frames, frames_);
- int taken = InternalRead(frames, true, 0, NULL);
+ int taken = InternalRead(frames, true, 0, 0, NULL);
DCHECK_EQ(taken, frames);
}
int AudioBufferQueue::InternalRead(int frames,
bool advance_position,
- int forward_offset,
+ int source_frame_offset,
+ int dest_frame_offset,
AudioBus* dest) {
// Counts how many frames are actually read from the buffer queue.
int taken = 0;
BufferQueue::iterator current_buffer = current_buffer_;
int current_buffer_offset = current_buffer_offset_;
- int frames_to_skip = forward_offset;
+ int frames_to_skip = source_frame_offset;
while (taken < frames) {
// |current_buffer| is valid since the first time this buffer is appended
// with data. Make sure there is data to be processed.
@@ -94,8 +98,10 @@ int AudioBufferQueue::InternalRead(int frames,
int copied = std::min(frames - taken, remaining_frames_in_buffer);
// if |dest| is NULL, there's no need to copy.
- if (dest)
- buffer->ReadFrames(copied, current_buffer_offset, taken, dest);
+ if (dest) {
+ buffer->ReadFrames(
+ copied, current_buffer_offset, dest_frame_offset + taken, dest);
+ }
// Increase total number of frames copied, which regulates when to end
// this loop.
@@ -131,14 +137,13 @@ int AudioBufferQueue::InternalRead(int frames,
DCHECK_GE(frames_, 0);
DCHECK(current_buffer_ != buffers_.end() || frames_ == 0);
- current_buffer_ = current_buffer;
- current_buffer_offset_ = current_buffer_offset;
-
- UpdateCurrentTime(current_buffer_, current_buffer_offset_);
+ UpdateCurrentTime(current_buffer, current_buffer_offset);
// Remove any buffers before the current buffer as there is no going
// backwards.
- buffers_.erase(buffers_.begin(), current_buffer_);
+ buffers_.erase(buffers_.begin(), current_buffer);
+ current_buffer_ = buffers_.begin();
+ current_buffer_offset_ = current_buffer_offset;
}
return taken;
diff --git a/media/base/audio_buffer_queue.h b/media/base/audio_buffer_queue.h
index 5ae9e21..58823f2 100644
--- a/media/base/audio_buffer_queue.h
+++ b/media/base/audio_buffer_queue.h
@@ -35,13 +35,20 @@ class MEDIA_EXPORT AudioBufferQueue {
// Reads a maximum of |frames| frames into |dest| from the current position.
// Returns the number of frames read. The current position will advance by the
- // amount of frames read.
- int ReadFrames(int frames, AudioBus* dest);
+ // amount of frames read. |dest_frame_offset| specifies a starting offset into
+ // |dest|. On each call, the frames are converted from their source format
+ // into the destination AudioBus.
+ int ReadFrames(int frames, int dest_frame_offset, AudioBus* dest);
// Copies up to |frames| frames from current position to |dest|. Returns
// number of frames copied. Doesn't advance current position. Starts at
- // |forward_offset| from current position.
- int PeekFrames(int frames, int forward_offset, AudioBus* dest);
+ // |source_frame_offset| from current position. |dest_frame_offset| specifies
+ // a starting offset into |dest|. On each call, the frames are converted from
+ // their source format into the destination AudioBus.
+ int PeekFrames(int frames,
+ int source_frame_offset,
+ int dest_frame_offset,
+ AudioBus* dest);
// Moves the current position forward by |frames| frames. If |frames| exceeds
// frames available, the seek operation will fail.
@@ -66,10 +73,12 @@ class MEDIA_EXPORT AudioBufferQueue {
// the number of frames read. The current position will be moved forward by
// the number of frames read if |advance_position| is set. If |dest| is NULL,
// only the current position will advance but no data will be copied.
- // |forward_offset| can be used to skip frames before reading.
+ // |source_frame_offset| can be used to skip frames before reading.
+ // |dest_frame_offset| specifies a starting offset into |dest|.
int InternalRead(int frames,
bool advance_position,
- int forward_offset,
+ int source_frame_offset,
+ int dest_frame_offset,
AudioBus* dest);
// Updates |current_time_| with the time that corresponds to the specified
diff --git a/media/base/audio_buffer_queue_unittest.cc b/media/base/audio_buffer_queue_unittest.cc
index 005c5ce..b95bdca 100644
--- a/media/base/audio_buffer_queue_unittest.cc
+++ b/media/base/audio_buffer_queue_unittest.cc
@@ -31,95 +31,98 @@ static void VerifyResult(float* channel_data,
TEST(AudioBufferQueueTest, AppendAndClear) {
const int channels = 1;
const int frames = 8;
- const base::TimeDelta start_time;
+ const base::TimeDelta kNoTime = kNoTimestamp();
AudioBufferQueue buffer;
EXPECT_EQ(0, buffer.frames());
buffer.Append(MakeInterleavedAudioBuffer<uint8>(
- kSampleFormatU8, channels, 10, 1, frames, start_time));
+ kSampleFormatU8, channels, 10, 1, frames, kNoTime, kNoTime));
EXPECT_EQ(frames, buffer.frames());
buffer.Clear();
EXPECT_EQ(0, buffer.frames());
buffer.Append(MakeInterleavedAudioBuffer<uint8>(
- kSampleFormatU8, channels, 20, 1, frames, start_time));
+ kSampleFormatU8, channels, 20, 1, frames, kNoTime, kNoTime));
EXPECT_EQ(frames, buffer.frames());
}
TEST(AudioBufferQueueTest, MultipleAppend) {
const int channels = 1;
- const base::TimeDelta start_time;
+ const int frames = 8;
+ const base::TimeDelta kNoTime = kNoTimestamp();
AudioBufferQueue buffer;
// Append 40 frames in 5 buffers.
buffer.Append(MakeInterleavedAudioBuffer<uint8>(
- kSampleFormatU8, channels, 10, 1, 8, start_time));
+ kSampleFormatU8, channels, 10, 1, frames, kNoTime, kNoTime));
EXPECT_EQ(8, buffer.frames());
buffer.Append(MakeInterleavedAudioBuffer<uint8>(
- kSampleFormatU8, channels, 10, 1, 8, start_time));
+ kSampleFormatU8, channels, 10, 1, frames, kNoTime, kNoTime));
EXPECT_EQ(16, buffer.frames());
buffer.Append(MakeInterleavedAudioBuffer<uint8>(
- kSampleFormatU8, channels, 10, 1, 8, start_time));
+ kSampleFormatU8, channels, 10, 1, frames, kNoTime, kNoTime));
EXPECT_EQ(24, buffer.frames());
buffer.Append(MakeInterleavedAudioBuffer<uint8>(
- kSampleFormatU8, channels, 10, 1, 8, start_time));
+ kSampleFormatU8, channels, 10, 1, frames, kNoTime, kNoTime));
EXPECT_EQ(32, buffer.frames());
buffer.Append(MakeInterleavedAudioBuffer<uint8>(
- kSampleFormatU8, channels, 10, 1, 8, start_time));
+ kSampleFormatU8, channels, 10, 1, frames, kNoTime, kNoTime));
EXPECT_EQ(40, buffer.frames());
}
TEST(AudioBufferQueueTest, IteratorCheck) {
const int channels = 1;
- const base::TimeDelta start_time;
+ const int frames = 8;
+ const base::TimeDelta kNoTime = kNoTimestamp();
AudioBufferQueue buffer;
scoped_ptr<AudioBus> bus = AudioBus::Create(channels, 100);
// Append 40 frames in 5 buffers. Intersperse ReadFrames() to make the
// iterator is pointing to the correct position.
buffer.Append(MakeInterleavedAudioBuffer<float>(
- kSampleFormatF32, channels, 10.0f, 1.0f, 8, start_time));
+ kSampleFormatF32, channels, 10.0f, 1.0f, frames, kNoTime, kNoTime));
EXPECT_EQ(8, buffer.frames());
- EXPECT_EQ(4, buffer.ReadFrames(4, bus.get()));
+ EXPECT_EQ(4, buffer.ReadFrames(4, 0, bus.get()));
EXPECT_EQ(4, buffer.frames());
VerifyResult(bus->channel(0), 4, 10.0f, 1.0f);
buffer.Append(MakeInterleavedAudioBuffer<float>(
- kSampleFormatF32, channels, 20.0f, 1.0f, 8, start_time));
+ kSampleFormatF32, channels, 20.0f, 1.0f, frames, kNoTime, kNoTime));
EXPECT_EQ(12, buffer.frames());
buffer.Append(MakeInterleavedAudioBuffer<float>(
- kSampleFormatF32, channels, 30.0f, 1.0f, 8, start_time));
+ kSampleFormatF32, channels, 30.0f, 1.0f, frames, kNoTime, kNoTime));
EXPECT_EQ(20, buffer.frames());
buffer.SeekFrames(16);
- EXPECT_EQ(4, buffer.ReadFrames(4, bus.get()));
+ EXPECT_EQ(4, buffer.ReadFrames(4, 0, bus.get()));
EXPECT_EQ(0, buffer.frames());
VerifyResult(bus->channel(0), 4, 34.0f, 1.0f);
buffer.Append(MakeInterleavedAudioBuffer<float>(
- kSampleFormatF32, channels, 40.0f, 1.0f, 8, start_time));
+ kSampleFormatF32, channels, 40.0f, 1.0f, frames, kNoTime, kNoTime));
EXPECT_EQ(8, buffer.frames());
buffer.Append(MakeInterleavedAudioBuffer<float>(
- kSampleFormatF32, channels, 50.0f, 1.0f, 8, start_time));
+ kSampleFormatF32, channels, 50.0f, 1.0f, frames, kNoTime, kNoTime));
EXPECT_EQ(16, buffer.frames());
- EXPECT_EQ(4, buffer.ReadFrames(4, bus.get()));
+ EXPECT_EQ(4, buffer.ReadFrames(4, 0, bus.get()));
VerifyResult(bus->channel(0), 4, 40.0f, 1.0f);
// Read off the end of the buffer.
EXPECT_EQ(12, buffer.frames());
buffer.SeekFrames(8);
- EXPECT_EQ(4, buffer.ReadFrames(100, bus.get()));
+ EXPECT_EQ(4, buffer.ReadFrames(100, 0, bus.get()));
VerifyResult(bus->channel(0), 4, 54.0f, 1.0f);
}
TEST(AudioBufferQueueTest, Seek) {
const int channels = 2;
- const base::TimeDelta start_time;
+ const int frames = 6;
+ const base::TimeDelta kNoTime = kNoTimestamp();
AudioBufferQueue buffer;
// Add 6 frames of data.
buffer.Append(MakeInterleavedAudioBuffer<float>(
- kSampleFormatF32, channels, 1.0f, 1.0f, 6, start_time));
+ kSampleFormatF32, channels, 1.0f, 1.0f, frames, kNoTime, kNoTime));
EXPECT_EQ(6, buffer.frames());
// Seek past 2 frames.
@@ -136,57 +139,58 @@ TEST(AudioBufferQueueTest, Seek) {
TEST(AudioBufferQueueTest, ReadF32) {
const int channels = 2;
- const base::TimeDelta start_time;
+ const base::TimeDelta kNoTime = kNoTimestamp();
AudioBufferQueue buffer;
// Add 76 frames of data.
buffer.Append(MakeInterleavedAudioBuffer<float>(
- kSampleFormatF32, channels, 1.0f, 1.0f, 6, start_time));
+ kSampleFormatF32, channels, 1.0f, 1.0f, 6, kNoTime, kNoTime));
buffer.Append(MakeInterleavedAudioBuffer<float>(
- kSampleFormatF32, channels, 13.0f, 1.0f, 10, start_time));
+ kSampleFormatF32, channels, 13.0f, 1.0f, 10, kNoTime, kNoTime));
buffer.Append(MakeInterleavedAudioBuffer<float>(
- kSampleFormatF32, channels, 33.0f, 1.0f, 60, start_time));
+ kSampleFormatF32, channels, 33.0f, 1.0f, 60, kNoTime, kNoTime));
EXPECT_EQ(76, buffer.frames());
// Read 3 frames from the buffer. F32 is interleaved, so ch[0] should be
// 1, 3, 5, and ch[1] should be 2, 4, 6.
scoped_ptr<AudioBus> bus = AudioBus::Create(channels, 100);
- EXPECT_EQ(3, buffer.ReadFrames(3, bus.get()));
+ EXPECT_EQ(3, buffer.ReadFrames(3, 0, bus.get()));
EXPECT_EQ(73, buffer.frames());
VerifyResult(bus->channel(0), 3, 1.0f, 2.0f);
VerifyResult(bus->channel(1), 3, 2.0f, 2.0f);
- // Now read 5 frames, which will span buffers.
- EXPECT_EQ(5, buffer.ReadFrames(5, bus.get()));
+ // Now read 5 frames, which will span buffers. Append the data into AudioBus.
+ EXPECT_EQ(5, buffer.ReadFrames(5, 3, bus.get()));
EXPECT_EQ(68, buffer.frames());
- VerifyResult(bus->channel(0), 5, 7.0f, 2.0f);
- VerifyResult(bus->channel(1), 5, 8.0f, 2.0f);
+ VerifyResult(bus->channel(0), 8, 1.0f, 2.0f);
+ VerifyResult(bus->channel(1), 8, 2.0f, 2.0f);
// Now skip into the third buffer.
buffer.SeekFrames(20);
EXPECT_EQ(48, buffer.frames());
// Now read 2 frames, which are in the third buffer.
- EXPECT_EQ(2, buffer.ReadFrames(2, bus.get()));
+ EXPECT_EQ(2, buffer.ReadFrames(2, 0, bus.get()));
VerifyResult(bus->channel(0), 2, 57.0f, 2.0f);
VerifyResult(bus->channel(1), 2, 58.0f, 2.0f);
}
TEST(AudioBufferQueueTest, ReadU8) {
const int channels = 4;
- const base::TimeDelta start_time;
+ const int frames = 4;
+ const base::TimeDelta kNoTime = kNoTimestamp();
AudioBufferQueue buffer;
// Add 4 frames of data.
buffer.Append(MakeInterleavedAudioBuffer<uint8>(
- kSampleFormatU8, channels, 128, 1, 4, start_time));
+ kSampleFormatU8, channels, 128, 1, frames, kNoTime, kNoTime));
// Read all 4 frames from the buffer. Data is interleaved, so ch[0] should be
// 128, 132, 136, 140, other channels similar. However, values are converted
// from [0, 255] to [-1.0, 1.0] with a bias of 128. Thus the first buffer
// value should be 0.0, then 1/127, 2/127, etc.
scoped_ptr<AudioBus> bus = AudioBus::Create(channels, 100);
- EXPECT_EQ(4, buffer.ReadFrames(4, bus.get()));
+ EXPECT_EQ(4, buffer.ReadFrames(4, 0, bus.get()));
EXPECT_EQ(0, buffer.frames());
VerifyResult(bus->channel(0), 4, 0.0f, 4.0f / 127.0f);
VerifyResult(bus->channel(1), 4, 1.0f / 127.0f, 4.0f / 127.0f);
@@ -196,21 +200,21 @@ TEST(AudioBufferQueueTest, ReadU8) {
TEST(AudioBufferQueueTest, ReadS16) {
const int channels = 2;
- const base::TimeDelta start_time;
+ const base::TimeDelta kNoTime = kNoTimestamp();
AudioBufferQueue buffer;
// Add 24 frames of data.
buffer.Append(MakeInterleavedAudioBuffer<int16>(
- kSampleFormatS16, channels, 1, 1, 4, start_time));
+ kSampleFormatS16, channels, 1, 1, 4, kNoTime, kNoTime));
buffer.Append(MakeInterleavedAudioBuffer<int16>(
- kSampleFormatS16, channels, 9, 1, 20, start_time));
+ kSampleFormatS16, channels, 9, 1, 20, kNoTime, kNoTime));
EXPECT_EQ(24, buffer.frames());
// Read 6 frames from the buffer. Data is interleaved, so ch[0] should be
// 1, 3, 5, 7, 9, 11, and ch[1] should be 2, 4, 6, 8, 10, 12.
// Data is converted to float from -1.0 to 1.0 based on int16 range.
scoped_ptr<AudioBus> bus = AudioBus::Create(channels, 100);
- EXPECT_EQ(6, buffer.ReadFrames(6, bus.get()));
+ EXPECT_EQ(6, buffer.ReadFrames(6, 0, bus.get()));
EXPECT_EQ(18, buffer.frames());
VerifyResult(bus->channel(0), 6, 1.0f / kint16max, 2.0f / kint16max);
VerifyResult(bus->channel(1), 6, 2.0f / kint16max, 2.0f / kint16max);
@@ -218,27 +222,27 @@ TEST(AudioBufferQueueTest, ReadS16) {
TEST(AudioBufferQueueTest, ReadS32) {
const int channels = 2;
- const base::TimeDelta start_time;
+ const base::TimeDelta kNoTime = kNoTimestamp();
AudioBufferQueue buffer;
// Add 24 frames of data.
buffer.Append(MakeInterleavedAudioBuffer<int32>(
- kSampleFormatS32, channels, 1, 1, 4, start_time));
+ kSampleFormatS32, channels, 1, 1, 4, kNoTime, kNoTime));
buffer.Append(MakeInterleavedAudioBuffer<int32>(
- kSampleFormatS32, channels, 9, 1, 20, start_time));
+ kSampleFormatS32, channels, 9, 1, 20, kNoTime, kNoTime));
EXPECT_EQ(24, buffer.frames());
// Read 6 frames from the buffer. Data is interleaved, so ch[0] should be
// 1, 3, 5, 7, 100, 106, and ch[1] should be 2, 4, 6, 8, 103, 109.
// Data is converted to float from -1.0 to 1.0 based on int32 range.
scoped_ptr<AudioBus> bus = AudioBus::Create(channels, 100);
- EXPECT_EQ(6, buffer.ReadFrames(6, bus.get()));
+ EXPECT_EQ(6, buffer.ReadFrames(6, 0, bus.get()));
EXPECT_EQ(18, buffer.frames());
VerifyResult(bus->channel(0), 6, 1.0f / kint32max, 2.0f / kint32max);
VerifyResult(bus->channel(1), 6, 2.0f / kint32max, 2.0f / kint32max);
// Read the next 2 frames.
- EXPECT_EQ(2, buffer.ReadFrames(2, bus.get()));
+ EXPECT_EQ(2, buffer.ReadFrames(2, 0, bus.get()));
EXPECT_EQ(16, buffer.frames());
VerifyResult(bus->channel(0), 2, 13.0f / kint32max, 2.0f / kint32max);
VerifyResult(bus->channel(1), 2, 14.0f / kint32max, 2.0f / kint32max);
@@ -246,20 +250,20 @@ TEST(AudioBufferQueueTest, ReadS32) {
TEST(AudioBufferQueueTest, ReadF32Planar) {
const int channels = 2;
- const base::TimeDelta start_time;
+ const base::TimeDelta kNoTime = kNoTimestamp();
AudioBufferQueue buffer;
// Add 14 frames of data.
buffer.Append(MakePlanarAudioBuffer<float>(
- kSampleFormatPlanarF32, channels, 1.0f, 1.0f, 4, start_time));
+ kSampleFormatPlanarF32, channels, 1.0f, 1.0f, 4, kNoTime, kNoTime));
buffer.Append(MakePlanarAudioBuffer<float>(
- kSampleFormatPlanarF32, channels, 50.0f, 1.0f, 10, start_time));
+ kSampleFormatPlanarF32, channels, 50.0f, 1.0f, 10, kNoTime, kNoTime));
EXPECT_EQ(14, buffer.frames());
// Read 6 frames from the buffer. F32 is planar, so ch[0] should be
// 1, 2, 3, 4, 50, 51, and ch[1] should be 5, 6, 7, 8, 60, 61.
scoped_ptr<AudioBus> bus = AudioBus::Create(channels, 100);
- EXPECT_EQ(6, buffer.ReadFrames(6, bus.get()));
+ EXPECT_EQ(6, buffer.ReadFrames(6, 0, bus.get()));
EXPECT_EQ(8, buffer.frames());
VerifyResult(bus->channel(0), 4, 1.0f, 1.0f);
VerifyResult(bus->channel(0) + 4, 2, 50.0f, 1.0f);
@@ -269,21 +273,21 @@ TEST(AudioBufferQueueTest, ReadF32Planar) {
TEST(AudioBufferQueueTest, ReadS16Planar) {
const int channels = 2;
- const base::TimeDelta start_time;
+ const base::TimeDelta kNoTime = kNoTimestamp();
AudioBufferQueue buffer;
// Add 24 frames of data.
buffer.Append(MakePlanarAudioBuffer<int16>(
- kSampleFormatPlanarS16, channels, 1, 1, 4, start_time));
+ kSampleFormatPlanarS16, channels, 1, 1, 4, kNoTime, kNoTime));
buffer.Append(MakePlanarAudioBuffer<int16>(
- kSampleFormatPlanarS16, channels, 100, 5, 20, start_time));
+ kSampleFormatPlanarS16, channels, 100, 5, 20, kNoTime, kNoTime));
EXPECT_EQ(24, buffer.frames());
// Read 6 frames from the buffer. Data is planar, so ch[0] should be
// 1, 2, 3, 4, 100, 105, and ch[1] should be 5, 6, 7, 8, 200, 205.
// Data is converted to float from -1.0 to 1.0 based on int16 range.
scoped_ptr<AudioBus> bus = AudioBus::Create(channels, 100);
- EXPECT_EQ(6, buffer.ReadFrames(6, bus.get()));
+ EXPECT_EQ(6, buffer.ReadFrames(6, 0, bus.get()));
EXPECT_EQ(18, buffer.frames());
VerifyResult(bus->channel(0), 4, 1.0f / kint16max, 1.0f / kint16max);
VerifyResult(bus->channel(0) + 4, 2, 100.0f / kint16max, 5.0f / kint16max);
@@ -293,22 +297,27 @@ TEST(AudioBufferQueueTest, ReadS16Planar) {
TEST(AudioBufferQueueTest, ReadManyChannels) {
const int channels = 16;
- const base::TimeDelta start_time;
+ const base::TimeDelta kNoTime = kNoTimestamp();
AudioBufferQueue buffer;
// Add 76 frames of data.
buffer.Append(MakeInterleavedAudioBuffer<float>(
- kSampleFormatF32, channels, 0.0f, 1.0f, 6, start_time));
- buffer.Append(MakeInterleavedAudioBuffer<float>(
- kSampleFormatF32, channels, 6.0f * channels, 1.0f, 10, start_time));
+ kSampleFormatF32, channels, 0.0f, 1.0f, 6, kNoTime, kNoTime));
buffer.Append(MakeInterleavedAudioBuffer<float>(
- kSampleFormatF32, channels, 16.0f * channels, 1.0f, 60, start_time));
+ kSampleFormatF32, channels, 6.0f * channels, 1.0f, 10, kNoTime, kNoTime));
+ buffer.Append(MakeInterleavedAudioBuffer<float>(kSampleFormatF32,
+ channels,
+ 16.0f * channels,
+ 1.0f,
+ 60,
+ kNoTime,
+ kNoTime));
EXPECT_EQ(76, buffer.frames());
// Read 3 frames from the buffer. F32 is interleaved, so ch[0] should be
// 1, 17, 33, and ch[1] should be 2, 18, 34. Just check a few channels.
scoped_ptr<AudioBus> bus = AudioBus::Create(channels, 100);
- EXPECT_EQ(30, buffer.ReadFrames(30, bus.get()));
+ EXPECT_EQ(30, buffer.ReadFrames(30, 0, bus.get()));
EXPECT_EQ(46, buffer.frames());
for (int i = 0; i < channels; ++i) {
VerifyResult(bus->channel(i), 30, static_cast<float>(i), 16.0f);
@@ -317,24 +326,24 @@ TEST(AudioBufferQueueTest, ReadManyChannels) {
TEST(AudioBufferQueueTest, Peek) {
const int channels = 4;
- const base::TimeDelta start_time;
+ const base::TimeDelta kNoTime = kNoTimestamp();
AudioBufferQueue buffer;
// Add 60 frames of data.
buffer.Append(MakeInterleavedAudioBuffer<float>(
- kSampleFormatF32, channels, 0.0f, 1.0f, 60, start_time));
+ kSampleFormatF32, channels, 0.0f, 1.0f, 60, kNoTime, kNoTime));
EXPECT_EQ(60, buffer.frames());
// Peek at the first 30 frames.
scoped_ptr<AudioBus> bus1 = AudioBus::Create(channels, 100);
EXPECT_EQ(60, buffer.frames());
- EXPECT_EQ(60, buffer.PeekFrames(100, 0, bus1.get())); // only 60 in buffer.
- EXPECT_EQ(30, buffer.PeekFrames(30, 0, bus1.get())); // should get first 30.
+ EXPECT_EQ(60, buffer.PeekFrames(100, 0, 0, bus1.get()));
+ EXPECT_EQ(30, buffer.PeekFrames(30, 0, 0, bus1.get()));
EXPECT_EQ(60, buffer.frames());
// Now read the next 30 frames (which should be the same as those peeked at).
scoped_ptr<AudioBus> bus2 = AudioBus::Create(channels, 100);
- EXPECT_EQ(30, buffer.ReadFrames(30, bus2.get()));
+ EXPECT_EQ(30, buffer.ReadFrames(30, 0, bus2.get()));
for (int i = 0; i < channels; ++i) {
VerifyResult(bus1->channel(i),
30,
@@ -347,7 +356,7 @@ TEST(AudioBufferQueueTest, Peek) {
}
// Peek 10 frames forward
- EXPECT_EQ(5, buffer.PeekFrames(5, 10, bus1.get()));
+ EXPECT_EQ(5, buffer.PeekFrames(5, 10, 0, bus1.get()));
for (int i = 0; i < channels; ++i) {
VerifyResult(bus1->channel(i),
5,
@@ -357,14 +366,15 @@ TEST(AudioBufferQueueTest, Peek) {
// Peek to the end of the buffer.
EXPECT_EQ(30, buffer.frames());
- EXPECT_EQ(30, buffer.PeekFrames(100, 0, bus1.get()));
- EXPECT_EQ(30, buffer.PeekFrames(30, 0, bus1.get()));
+ EXPECT_EQ(30, buffer.PeekFrames(100, 0, 0, bus1.get()));
+ EXPECT_EQ(30, buffer.PeekFrames(30, 0, 0, bus1.get()));
}
TEST(AudioBufferQueueTest, Time) {
const int channels = 2;
const base::TimeDelta start_time1;
const base::TimeDelta start_time2 = base::TimeDelta::FromSeconds(30);
+ const base::TimeDelta duration = base::TimeDelta::FromSeconds(10);
AudioBufferQueue buffer;
scoped_ptr<AudioBus> bus = AudioBus::Create(channels, 100);
@@ -372,14 +382,14 @@ TEST(AudioBufferQueueTest, Time) {
// first: start=0s, duration=10s
// second: start=30s, duration=10s
buffer.Append(MakeInterleavedAudioBuffer<int16>(
- kSampleFormatS16, channels, 1, 1, 10, start_time1));
+ kSampleFormatS16, channels, 1, 1, 10, start_time1, duration));
EXPECT_EQ(10, buffer.frames());
// Check starting time.
EXPECT_EQ(start_time1, buffer.current_time());
// Read 2 frames, should be 2s in (since duration is 1s per sample).
- EXPECT_EQ(2, buffer.ReadFrames(2, bus.get()));
+ EXPECT_EQ(2, buffer.ReadFrames(2, 0, bus.get()));
EXPECT_EQ(start_time1 + base::TimeDelta::FromSeconds(2),
buffer.current_time());
@@ -390,67 +400,68 @@ TEST(AudioBufferQueueTest, Time) {
// Add second buffer for more data.
buffer.Append(MakeInterleavedAudioBuffer<int16>(
- kSampleFormatS16, channels, 1, 1, 10, start_time2));
+ kSampleFormatS16, channels, 1, 1, 10, start_time2, duration));
EXPECT_EQ(16, buffer.frames());
// Read until almost the end of buffer1.
- EXPECT_EQ(5, buffer.ReadFrames(5, bus.get()));
+ EXPECT_EQ(5, buffer.ReadFrames(5, 0, bus.get()));
EXPECT_EQ(start_time1 + base::TimeDelta::FromSeconds(9),
buffer.current_time());
// Read 1 value, so time moved to buffer2.
- EXPECT_EQ(1, buffer.ReadFrames(1, bus.get()));
+ EXPECT_EQ(1, buffer.ReadFrames(1, 0, bus.get()));
EXPECT_EQ(start_time2, buffer.current_time());
// Read all 10 frames in buffer2, timestamp should be last time from buffer2.
- EXPECT_EQ(10, buffer.ReadFrames(10, bus.get()));
+ EXPECT_EQ(10, buffer.ReadFrames(10, 0, bus.get()));
EXPECT_EQ(start_time2 + base::TimeDelta::FromSeconds(10),
buffer.current_time());
// Try to read more frames (which don't exist), timestamp should remain.
- EXPECT_EQ(0, buffer.ReadFrames(5, bus.get()));
+ EXPECT_EQ(0, buffer.ReadFrames(5, 0, bus.get()));
EXPECT_EQ(start_time2 + base::TimeDelta::FromSeconds(10),
buffer.current_time());
}
TEST(AudioBufferQueueTest, NoTime) {
const int channels = 2;
+ const base::TimeDelta kNoTime = kNoTimestamp();
AudioBufferQueue buffer;
scoped_ptr<AudioBus> bus = AudioBus::Create(channels, 100);
// Add two buffers with no timestamps. Time should always be unknown.
buffer.Append(MakeInterleavedAudioBuffer<int16>(
- kSampleFormatS16, channels, 1, 1, 10, kNoTimestamp()));
+ kSampleFormatS16, channels, 1, 1, 10, kNoTime, kNoTime));
buffer.Append(MakeInterleavedAudioBuffer<int16>(
- kSampleFormatS16, channels, 1, 1, 10, kNoTimestamp()));
+ kSampleFormatS16, channels, 1, 1, 10, kNoTime, kNoTime));
EXPECT_EQ(20, buffer.frames());
// Check starting time.
- EXPECT_EQ(kNoTimestamp(), buffer.current_time());
+ EXPECT_EQ(kNoTime, buffer.current_time());
// Read 2 frames.
- EXPECT_EQ(2, buffer.ReadFrames(2, bus.get()));
- EXPECT_EQ(kNoTimestamp(), buffer.current_time());
+ EXPECT_EQ(2, buffer.ReadFrames(2, 0, bus.get()));
+ EXPECT_EQ(kNoTime, buffer.current_time());
// Skip 2 frames.
buffer.SeekFrames(2);
- EXPECT_EQ(kNoTimestamp(), buffer.current_time());
+ EXPECT_EQ(kNoTime, buffer.current_time());
// Read until almost the end of buffer1.
- EXPECT_EQ(5, buffer.ReadFrames(5, bus.get()));
- EXPECT_EQ(kNoTimestamp(), buffer.current_time());
+ EXPECT_EQ(5, buffer.ReadFrames(5, 0, bus.get()));
+ EXPECT_EQ(kNoTime, buffer.current_time());
// Read 1 value, so time moved to buffer2.
- EXPECT_EQ(1, buffer.ReadFrames(1, bus.get()));
- EXPECT_EQ(kNoTimestamp(), buffer.current_time());
+ EXPECT_EQ(1, buffer.ReadFrames(1, 0, bus.get()));
+ EXPECT_EQ(kNoTime, buffer.current_time());
// Read all 10 frames in buffer2.
- EXPECT_EQ(10, buffer.ReadFrames(10, bus.get()));
- EXPECT_EQ(kNoTimestamp(), buffer.current_time());
+ EXPECT_EQ(10, buffer.ReadFrames(10, 0, bus.get()));
+ EXPECT_EQ(kNoTime, buffer.current_time());
// Try to read more frames (which don't exist), timestamp should remain.
- EXPECT_EQ(0, buffer.ReadFrames(5, bus.get()));
- EXPECT_EQ(kNoTimestamp(), buffer.current_time());
+ EXPECT_EQ(0, buffer.ReadFrames(5, 0, bus.get()));
+ EXPECT_EQ(kNoTime, buffer.current_time());
}
} // namespace media
diff --git a/media/base/audio_buffer_unittest.cc b/media/base/audio_buffer_unittest.cc
index 1c01354..f6384e8 100644
--- a/media/base/audio_buffer_unittest.cc
+++ b/media/base/audio_buffer_unittest.cc
@@ -27,8 +27,9 @@ TEST(AudioBufferTest, CopyFrom) {
const int channels = 1;
const int frames = 8;
const base::TimeDelta start_time;
+ const base::TimeDelta duration = base::TimeDelta::FromSeconds(frames);
scoped_refptr<AudioBuffer> buffer = MakeInterleavedAudioBuffer<uint8>(
- kSampleFormatU8, channels, 1, 1, frames, start_time);
+ kSampleFormatU8, channels, 1, 1, frames, start_time, duration);
EXPECT_EQ(frames, buffer->frame_count());
EXPECT_EQ(buffer->timestamp(), start_time);
EXPECT_EQ(buffer->duration().InSeconds(), frames);
@@ -61,8 +62,9 @@ TEST(AudioBufferTest, ReadU8) {
const int channels = 4;
const int frames = 4;
const base::TimeDelta start_time;
+ const base::TimeDelta duration = base::TimeDelta::FromSeconds(frames);
scoped_refptr<AudioBuffer> buffer = MakeInterleavedAudioBuffer<uint8>(
- kSampleFormatU8, channels, 128, 1, frames, start_time);
+ kSampleFormatU8, channels, 128, 1, frames, start_time, duration);
// Read all 4 frames from the buffer. Data is interleaved, so ch[0] should be
// 128, 132, 136, 140, other channels similar. However, values are converted
@@ -80,8 +82,9 @@ TEST(AudioBufferTest, ReadS16) {
const int channels = 2;
const int frames = 10;
const base::TimeDelta start_time;
+ const base::TimeDelta duration = base::TimeDelta::FromSeconds(frames);
scoped_refptr<AudioBuffer> buffer = MakeInterleavedAudioBuffer<int16>(
- kSampleFormatS16, channels, 1, 1, frames, start_time);
+ kSampleFormatS16, channels, 1, 1, frames, start_time, duration);
// Read 6 frames from the buffer. Data is interleaved, so ch[0] should be 1,
// 3, 5, 7, 9, 11, and ch[1] should be 2, 4, 6, 8, 10, 12. Data is converted
@@ -104,8 +107,9 @@ TEST(AudioBufferTest, ReadS32) {
const int channels = 2;
const int frames = 6;
const base::TimeDelta start_time;
+ const base::TimeDelta duration = base::TimeDelta::FromSeconds(frames);
scoped_refptr<AudioBuffer> buffer = MakeInterleavedAudioBuffer<int32>(
- kSampleFormatS32, channels, 1, 1, frames, start_time);
+ kSampleFormatS32, channels, 1, 1, frames, start_time, duration);
// Read 6 frames from the buffer. Data is interleaved, so ch[0] should be 1,
// 3, 5, 7, 9, 11, and ch[1] should be 2, 4, 6, 8, 10, 12. Data is converted
@@ -126,8 +130,9 @@ TEST(AudioBufferTest, ReadF32) {
const int channels = 2;
const int frames = 20;
const base::TimeDelta start_time;
+ const base::TimeDelta duration = base::TimeDelta::FromSeconds(frames);
scoped_refptr<AudioBuffer> buffer = MakeInterleavedAudioBuffer<float>(
- kSampleFormatF32, channels, 1.0f, 1.0f, frames, start_time);
+ kSampleFormatF32, channels, 1.0f, 1.0f, frames, start_time, duration);
// Read first 10 frames from the buffer. F32 is interleaved, so ch[0] should
// be 1, 3, 5, ... and ch[1] should be 2, 4, 6, ...
@@ -147,8 +152,9 @@ TEST(AudioBufferTest, ReadS16Planar) {
const int channels = 2;
const int frames = 20;
const base::TimeDelta start_time;
+ const base::TimeDelta duration = base::TimeDelta::FromSeconds(frames);
scoped_refptr<AudioBuffer> buffer = MakePlanarAudioBuffer<int16>(
- kSampleFormatPlanarS16, channels, 1, 1, frames, start_time);
+ kSampleFormatPlanarS16, channels, 1, 1, frames, start_time, duration);
// Read 6 frames from the buffer. Data is planar, so ch[0] should be 1, 2, 3,
// 4, 5, 6, and ch[1] should be 21, 22, 23, 24, 25, 26. Data is converted to
@@ -179,8 +185,15 @@ TEST(AudioBufferTest, ReadF32Planar) {
const int channels = 4;
const int frames = 100;
const base::TimeDelta start_time;
- scoped_refptr<AudioBuffer> buffer = MakePlanarAudioBuffer<float>(
- kSampleFormatPlanarF32, channels, 1.0f, 1.0f, frames, start_time);
+ const base::TimeDelta duration = base::TimeDelta::FromSeconds(frames);
+ scoped_refptr<AudioBuffer> buffer =
+ MakePlanarAudioBuffer<float>(kSampleFormatPlanarF32,
+ channels,
+ 1.0f,
+ 1.0f,
+ frames,
+ start_time,
+ duration);
// Read all 100 frames from the buffer. F32 is planar, so ch[0] should be 1,
// 2, 3, 4, ..., ch[1] should be 101, 102, 103, ..., and so on for all 4
@@ -201,4 +214,63 @@ TEST(AudioBufferTest, ReadF32Planar) {
VerifyResult(bus->channel(3), 20, 351.0f, 1.0f);
}
+TEST(AudioBufferTest, EmptyBuffer) {
+ const int channels = 4;
+ const int frames = 100;
+ const base::TimeDelta start_time;
+ const base::TimeDelta duration = base::TimeDelta::FromSeconds(frames);
+ scoped_refptr<AudioBuffer> buffer = AudioBuffer::CreateEmptyBuffer(
+ channels, frames, start_time, duration);
+ EXPECT_EQ(frames, buffer->frame_count());
+ EXPECT_EQ(start_time, buffer->timestamp());
+ EXPECT_EQ(frames, buffer->duration().InSeconds());
+ EXPECT_FALSE(buffer->end_of_stream());
+
+ // Read all 100 frames from the buffer. All data should be 0.
+ scoped_ptr<AudioBus> bus = AudioBus::Create(channels, 100);
+ buffer->ReadFrames(frames, 0, 0, bus.get());
+ VerifyResult(bus->channel(0), frames, 0.0f, 0.0f);
+ VerifyResult(bus->channel(1), frames, 0.0f, 0.0f);
+ VerifyResult(bus->channel(2), frames, 0.0f, 0.0f);
+ VerifyResult(bus->channel(3), frames, 0.0f, 0.0f);
+}
+
+TEST(AudioBufferTest, Trim) {
+ const int channels = 4;
+ const int frames = 100;
+ const base::TimeDelta start_time;
+ const base::TimeDelta duration = base::TimeDelta::FromSeconds(frames);
+ scoped_refptr<AudioBuffer> buffer =
+ MakePlanarAudioBuffer<float>(kSampleFormatPlanarF32,
+ channels,
+ 1.0f,
+ 1.0f,
+ frames,
+ start_time,
+ duration);
+ EXPECT_EQ(frames, buffer->frame_count());
+ EXPECT_EQ(start_time, buffer->timestamp());
+ EXPECT_EQ(frames, buffer->duration().InSeconds());
+
+ scoped_ptr<AudioBus> bus = AudioBus::Create(channels, 100);
+ buffer->ReadFrames(20, 0, 0, bus.get());
+ VerifyResult(bus->channel(0), 20, 1.0f, 1.0f);
+
+ // Trim off 10 frames.
+ buffer->TrimStart(10);
+ EXPECT_EQ(buffer->frame_count(), frames - 10);
+ EXPECT_EQ(buffer->timestamp(), start_time + base::TimeDelta::FromSeconds(10));
+ EXPECT_EQ(buffer->duration(), base::TimeDelta::FromSeconds(90));
+ buffer->ReadFrames(20, 0, 0, bus.get());
+ VerifyResult(bus->channel(0), 20, 11.0f, 1.0f);
+
+ // Trim off 80 more.
+ buffer->TrimStart(80);
+ EXPECT_EQ(buffer->frame_count(), frames - 90);
+ EXPECT_EQ(buffer->timestamp(), start_time + base::TimeDelta::FromSeconds(90));
+ EXPECT_EQ(buffer->duration(), base::TimeDelta::FromSeconds(10));
+ buffer->ReadFrames(10, 0, 0, bus.get());
+ VerifyResult(bus->channel(0), 10, 91.0f, 1.0f);
+}
+
} // namespace media
diff --git a/media/base/audio_bus.cc b/media/base/audio_bus.cc
index a53fdaf..518d83c 100644
--- a/media/base/audio_bus.cc
+++ b/media/base/audio_bus.cc
@@ -299,13 +299,24 @@ void AudioBus::ToInterleavedPartial(int start_frame, int frames,
}
void AudioBus::CopyTo(AudioBus* dest) const {
+ CopyPartialFramesTo(0, frames(), 0, dest);
+}
+
+void AudioBus::CopyPartialFramesTo(int source_start_frame,
+ int frame_count,
+ int dest_start_frame,
+ AudioBus* dest) const {
CHECK_EQ(channels(), dest->channels());
- CHECK_EQ(frames(), dest->frames());
+ CHECK_LE(source_start_frame + frame_count, frames());
+ CHECK_LE(dest_start_frame + frame_count, dest->frames());
// Since we don't know if the other AudioBus is wrapped or not (and we don't
// want to care), just copy using the public channel() accessors.
- for (int i = 0; i < channels(); ++i)
- memcpy(dest->channel(i), channel(i), sizeof(*channel(i)) * frames());
+ for (int i = 0; i < channels(); ++i) {
+ memcpy(dest->channel(i) + dest_start_frame,
+ channel(i) + source_start_frame,
+ sizeof(*channel(i)) * frame_count);
+ }
}
void AudioBus::Scale(float volume) {
diff --git a/media/base/audio_bus.h b/media/base/audio_bus.h
index 61a53ed..dbb49ca 100644
--- a/media/base/audio_bus.h
+++ b/media/base/audio_bus.h
@@ -76,6 +76,15 @@ class MEDIA_EXPORT AudioBus {
// AudioBus object must have the same frames() and channels().
void CopyTo(AudioBus* dest) const;
+ // Helper method to copy frames from one AudioBus to another. Both AudioBus
+ // objects must have the same number of channels(). |source_start_frame| is
+ // the starting offset. |dest_start_frame| is the starting offset in |dest|.
+ // |frame_count| is the number of frames to copy.
+ void CopyPartialFramesTo(int source_start_frame,
+ int frame_count,
+ int dest_start_frame,
+ AudioBus* dest) const;
+
// Returns a raw pointer to the requested channel. Pointer is guaranteed to
// have a 16-byte alignment. Warning: Do not rely on having sane (i.e. not
// inf, nan, or between [-1.0, 1.0]) values in the channel data.
diff --git a/media/base/audio_decoder.h b/media/base/audio_decoder.h
index c88aab3..aa2eeb8 100644
--- a/media/base/audio_decoder.h
+++ b/media/base/audio_decoder.h
@@ -13,7 +13,7 @@
namespace media {
-class DataBuffer;
+class AudioBuffer;
class DemuxerStream;
class MEDIA_EXPORT AudioDecoder {
@@ -45,7 +45,8 @@ class MEDIA_EXPORT AudioDecoder {
// indicate the end of the stream. A NULL buffer pointer indicates an aborted
// Read(). This can happen if the DemuxerStream gets flushed and doesn't have
// any more data to return.
- typedef base::Callback<void(Status, const scoped_refptr<DataBuffer>&)> ReadCB;
+ typedef base::Callback<void(Status, const scoped_refptr<AudioBuffer>&)>
+ ReadCB;
virtual void Read(const ReadCB& read_cb) = 0;
// Reset decoder state, dropping any queued encoded data.
diff --git a/media/base/audio_splicer.cc b/media/base/audio_splicer.cc
index 206a858..14b4199 100644
--- a/media/base/audio_splicer.cc
+++ b/media/base/audio_splicer.cc
@@ -7,10 +7,10 @@
#include <cstdlib>
#include "base/logging.h"
+#include "media/base/audio_buffer.h"
#include "media/base/audio_decoder_config.h"
#include "media/base/audio_timestamp_helper.h"
#include "media/base/buffers.h"
-#include "media/base/data_buffer.h"
namespace media {
@@ -20,9 +20,9 @@ namespace media {
// roughly represents the duration of 2 compressed AAC or MP3 frames.
static const int kMaxTimeDeltaInMilliseconds = 50;
-AudioSplicer::AudioSplicer(int bytes_per_frame, int samples_per_second)
- : output_timestamp_helper_(bytes_per_frame, samples_per_second),
- min_gap_size_(2 * bytes_per_frame),
+AudioSplicer::AudioSplicer(int samples_per_second)
+ : output_timestamp_helper_(samples_per_second),
+ min_gap_size_(2),
received_end_of_stream_(false) {
}
@@ -35,7 +35,7 @@ void AudioSplicer::Reset() {
received_end_of_stream_ = false;
}
-bool AudioSplicer::AddInput(const scoped_refptr<DataBuffer>& input){
+bool AudioSplicer::AddInput(const scoped_refptr<AudioBuffer>& input) {
DCHECK(!received_end_of_stream_ || input->end_of_stream());
if (input->end_of_stream()) {
@@ -46,7 +46,7 @@ bool AudioSplicer::AddInput(const scoped_refptr<DataBuffer>& input){
DCHECK(input->timestamp() != kNoTimestamp());
DCHECK(input->duration() > base::TimeDelta());
- DCHECK_GT(input->data_size(), 0);
+ DCHECK_GT(input->frame_count(), 0);
if (output_timestamp_helper_.base_timestamp() == kNoTimestamp())
output_timestamp_helper_.SetBaseTimestamp(input->timestamp());
@@ -65,26 +65,26 @@ bool AudioSplicer::AddInput(const scoped_refptr<DataBuffer>& input){
return false;
}
- int bytes_to_fill = 0;
+ int frames_to_fill = 0;
if (delta != base::TimeDelta())
- bytes_to_fill = output_timestamp_helper_.GetBytesToTarget(timestamp);
+ frames_to_fill = output_timestamp_helper_.GetFramesToTarget(timestamp);
- if (bytes_to_fill == 0 || std::abs(bytes_to_fill) < min_gap_size_) {
+ if (frames_to_fill == 0 || std::abs(frames_to_fill) < min_gap_size_) {
AddOutputBuffer(input);
return true;
}
- if (bytes_to_fill > 0) {
+ if (frames_to_fill > 0) {
DVLOG(1) << "Gap detected @ " << expected_timestamp.InMicroseconds()
<< " us: " << delta.InMicroseconds() << " us";
// Create a buffer with enough silence samples to fill the gap and
// add it to the output buffer.
- scoped_refptr<DataBuffer> gap = new DataBuffer(bytes_to_fill);
- gap->set_data_size(bytes_to_fill);
- memset(gap->writable_data(), 0, bytes_to_fill);
- gap->set_timestamp(expected_timestamp);
- gap->set_duration(output_timestamp_helper_.GetDuration(bytes_to_fill));
+ scoped_refptr<AudioBuffer> gap = AudioBuffer::CreateEmptyBuffer(
+ input->channel_count(),
+ frames_to_fill,
+ expected_timestamp,
+ output_timestamp_helper_.GetFrameDuration(frames_to_fill));
AddOutputBuffer(gap);
// Add the input buffer now that the gap has been filled.
@@ -92,12 +92,12 @@ bool AudioSplicer::AddInput(const scoped_refptr<DataBuffer>& input){
return true;
}
- int bytes_to_skip = -bytes_to_fill;
+ int frames_to_skip = -frames_to_fill;
DVLOG(1) << "Overlap detected @ " << expected_timestamp.InMicroseconds()
<< " us: " << -delta.InMicroseconds() << " us";
- if (input->data_size() <= bytes_to_skip) {
+ if (input->frame_count() <= frames_to_skip) {
DVLOG(1) << "Dropping whole buffer";
return true;
}
@@ -107,17 +107,8 @@ bool AudioSplicer::AddInput(const scoped_refptr<DataBuffer>& input){
//
// TODO(acolwell): Implement a cross-fade here so the transition is less
// jarring.
- int new_buffer_size = input->data_size() - bytes_to_skip;
-
- scoped_refptr<DataBuffer> new_buffer = new DataBuffer(new_buffer_size);
- new_buffer->set_data_size(new_buffer_size);
- memcpy(new_buffer->writable_data(),
- input->data() + bytes_to_skip,
- new_buffer_size);
- new_buffer->set_timestamp(expected_timestamp);
- new_buffer->set_duration(
- output_timestamp_helper_.GetDuration(new_buffer_size));
- AddOutputBuffer(new_buffer);
+ input->TrimStart(frames_to_skip);
+ AddOutputBuffer(input);
return true;
}
@@ -125,14 +116,14 @@ bool AudioSplicer::HasNextBuffer() const {
return !output_buffers_.empty();
}
-scoped_refptr<DataBuffer> AudioSplicer::GetNextBuffer() {
- scoped_refptr<DataBuffer> ret = output_buffers_.front();
+scoped_refptr<AudioBuffer> AudioSplicer::GetNextBuffer() {
+ scoped_refptr<AudioBuffer> ret = output_buffers_.front();
output_buffers_.pop_front();
return ret;
}
-void AudioSplicer::AddOutputBuffer(const scoped_refptr<DataBuffer>& buffer) {
- output_timestamp_helper_.AddBytes(buffer->data_size());
+void AudioSplicer::AddOutputBuffer(const scoped_refptr<AudioBuffer>& buffer) {
+ output_timestamp_helper_.AddFrames(buffer->frame_count());
output_buffers_.push_back(buffer);
}
diff --git a/media/base/audio_splicer.h b/media/base/audio_splicer.h
index 22cce47..50445b2 100644
--- a/media/base/audio_splicer.h
+++ b/media/base/audio_splicer.h
@@ -13,13 +13,13 @@
namespace media {
+class AudioBuffer;
class AudioDecoderConfig;
-class DataBuffer;
// Helper class that handles filling gaps and resolving overlaps.
class MEDIA_EXPORT AudioSplicer {
public:
- AudioSplicer(int bytes_per_frame, int samples_per_second);
+ AudioSplicer(int samples_per_second);
~AudioSplicer();
// Resets the splicer state by clearing the output buffers queue,
@@ -29,27 +29,27 @@ class MEDIA_EXPORT AudioSplicer {
// Adds a new buffer full of samples or end of stream buffer to the splicer.
// Returns true if the buffer was accepted. False is returned if an error
// occurred.
- bool AddInput(const scoped_refptr<DataBuffer>& input);
+ bool AddInput(const scoped_refptr<AudioBuffer>& input);
// Returns true if the splicer has a buffer to return.
bool HasNextBuffer() const;
// Removes the next buffer from the output buffer queue and returns it.
// This should only be called if HasNextBuffer() returns true.
- scoped_refptr<DataBuffer> GetNextBuffer();
+ scoped_refptr<AudioBuffer> GetNextBuffer();
private:
- void AddOutputBuffer(const scoped_refptr<DataBuffer>& buffer);
+ void AddOutputBuffer(const scoped_refptr<AudioBuffer>& buffer);
AudioTimestampHelper output_timestamp_helper_;
// Minimum gap size needed before the splicer will take action to
// fill a gap. This avoids periodically inserting and then dropping samples
// when the buffer timestamps are slightly off because of timestamp rounding
- // in the source content.
+ // in the source content. Unit is frames.
int min_gap_size_;
- std::deque<scoped_refptr<DataBuffer> > output_buffers_;
+ std::deque<scoped_refptr<AudioBuffer> > output_buffers_;
bool received_end_of_stream_;
DISALLOW_IMPLICIT_CONSTRUCTORS(AudioSplicer);
diff --git a/media/base/audio_splicer_unittest.cc b/media/base/audio_splicer_unittest.cc
index 2929007..4390268 100644
--- a/media/base/audio_splicer_unittest.cc
+++ b/media/base/audio_splicer_unittest.cc
@@ -3,44 +3,52 @@
// found in the LICENSE file.
#include "base/memory/scoped_ptr.h"
+#include "media/base/audio_buffer.h"
+#include "media/base/audio_bus.h"
#include "media/base/audio_splicer.h"
#include "media/base/audio_timestamp_helper.h"
#include "media/base/buffers.h"
-#include "media/base/data_buffer.h"
+#include "media/base/test_helpers.h"
#include "testing/gtest/include/gtest/gtest.h"
namespace media {
-static const int kBytesPerFrame = 4;
+static const SampleFormat kSampleFormat = kSampleFormatF32;
+static const int kChannels = 1;
static const int kDefaultSampleRate = 44100;
-static const int kDefaultBufferSize = 100 * kBytesPerFrame;
+static const int kDefaultBufferSize = 100;
class AudioSplicerTest : public ::testing::Test {
public:
AudioSplicerTest()
- : splicer_(kBytesPerFrame, kDefaultSampleRate),
- input_timestamp_helper_(kBytesPerFrame, kDefaultSampleRate) {
+ : splicer_(kDefaultSampleRate),
+ input_timestamp_helper_(kDefaultSampleRate) {
input_timestamp_helper_.SetBaseTimestamp(base::TimeDelta());
}
- scoped_refptr<DataBuffer> GetNextInputBuffer(uint8 value) {
+ scoped_refptr<AudioBuffer> GetNextInputBuffer(float value) {
return GetNextInputBuffer(value, kDefaultBufferSize);
}
- scoped_refptr<DataBuffer> GetNextInputBuffer(uint8 value, int size) {
- scoped_refptr<DataBuffer> buffer = new DataBuffer(size);
- buffer->set_data_size(size);
- memset(buffer->writable_data(), value, buffer->data_size());
- buffer->set_timestamp(input_timestamp_helper_.GetTimestamp());
- buffer->set_duration(
- input_timestamp_helper_.GetDuration(buffer->data_size()));
- input_timestamp_helper_.AddBytes(buffer->data_size());
+ scoped_refptr<AudioBuffer> GetNextInputBuffer(float value, int frame_size) {
+ scoped_refptr<AudioBuffer> buffer = MakeInterleavedAudioBuffer<float>(
+ kSampleFormat,
+ kChannels,
+ value,
+ 0.0f,
+ frame_size,
+ input_timestamp_helper_.GetTimestamp(),
+ input_timestamp_helper_.GetFrameDuration(frame_size));
+ input_timestamp_helper_.AddFrames(frame_size);
return buffer;
}
- bool VerifyData(const uint8* data, int size, int value) {
- for (int i = 0; i < size; ++i) {
- if (data[i] != value)
+ bool VerifyData(scoped_refptr<AudioBuffer> buffer, float value) {
+ int frames = buffer->frame_count();
+ scoped_ptr<AudioBus> bus = AudioBus::Create(kChannels, frames);
+ buffer->ReadFrames(frames, 0, 0, bus.get());
+ for (int i = 0; i < frames; ++i) {
+ if (bus->channel(0)[i] != value)
return false;
}
return true;
@@ -57,38 +65,39 @@ TEST_F(AudioSplicerTest, PassThru) {
EXPECT_FALSE(splicer_.HasNextBuffer());
// Test single buffer pass-thru behavior.
- scoped_refptr<DataBuffer> input_1 = GetNextInputBuffer(1);
+ scoped_refptr<AudioBuffer> input_1 = GetNextInputBuffer(0.1f);
EXPECT_TRUE(splicer_.AddInput(input_1));
EXPECT_TRUE(splicer_.HasNextBuffer());
- scoped_refptr<DataBuffer> output_1 = splicer_.GetNextBuffer();
+ scoped_refptr<AudioBuffer> output_1 = splicer_.GetNextBuffer();
EXPECT_FALSE(splicer_.HasNextBuffer());
EXPECT_EQ(input_1->timestamp(), output_1->timestamp());
EXPECT_EQ(input_1->duration(), output_1->duration());
- EXPECT_EQ(input_1->data_size(), output_1->data_size());
+ EXPECT_EQ(input_1->frame_count(), output_1->frame_count());
+ EXPECT_TRUE(VerifyData(output_1, 0.1f));
// Test that multiple buffers can be queued in the splicer.
- scoped_refptr<DataBuffer> input_2 = GetNextInputBuffer(2);
- scoped_refptr<DataBuffer> input_3 = GetNextInputBuffer(3);
+ scoped_refptr<AudioBuffer> input_2 = GetNextInputBuffer(0.2f);
+ scoped_refptr<AudioBuffer> input_3 = GetNextInputBuffer(0.3f);
EXPECT_TRUE(splicer_.AddInput(input_2));
EXPECT_TRUE(splicer_.AddInput(input_3));
EXPECT_TRUE(splicer_.HasNextBuffer());
- scoped_refptr<DataBuffer> output_2 = splicer_.GetNextBuffer();
+ scoped_refptr<AudioBuffer> output_2 = splicer_.GetNextBuffer();
EXPECT_TRUE(splicer_.HasNextBuffer());
EXPECT_EQ(input_2->timestamp(), output_2->timestamp());
EXPECT_EQ(input_2->duration(), output_2->duration());
- EXPECT_EQ(input_2->data_size(), output_2->data_size());
+ EXPECT_EQ(input_2->frame_count(), output_2->frame_count());
- scoped_refptr<DataBuffer> output_3 = splicer_.GetNextBuffer();
+ scoped_refptr<AudioBuffer> output_3 = splicer_.GetNextBuffer();
EXPECT_FALSE(splicer_.HasNextBuffer());
EXPECT_EQ(input_3->timestamp(), output_3->timestamp());
EXPECT_EQ(input_3->duration(), output_3->duration());
- EXPECT_EQ(input_3->data_size(), output_3->data_size());
+ EXPECT_EQ(input_3->frame_count(), output_3->frame_count());
}
TEST_F(AudioSplicerTest, Reset) {
- scoped_refptr<DataBuffer> input_1 = GetNextInputBuffer(1);
+ scoped_refptr<AudioBuffer> input_1 = GetNextInputBuffer(0.1f);
EXPECT_TRUE(splicer_.AddInput(input_1));
EXPECT_TRUE(splicer_.HasNextBuffer());
@@ -99,47 +108,47 @@ TEST_F(AudioSplicerTest, Reset) {
// next buffer starts many frames beyond the end of
// |input_1|. This is to make sure that Reset() actually
// clears its state and doesn't try to insert a gap.
- input_timestamp_helper_.AddBytes(100 * kBytesPerFrame);
+ input_timestamp_helper_.AddFrames(100);
// Verify that a new input buffer passes through as expected.
- scoped_refptr<DataBuffer> input_2 = GetNextInputBuffer(2);
+ scoped_refptr<AudioBuffer> input_2 = GetNextInputBuffer(0.2f);
EXPECT_TRUE(splicer_.AddInput(input_2));
EXPECT_TRUE(splicer_.HasNextBuffer());
- scoped_refptr<DataBuffer> output_2 = splicer_.GetNextBuffer();
+ scoped_refptr<AudioBuffer> output_2 = splicer_.GetNextBuffer();
EXPECT_FALSE(splicer_.HasNextBuffer());
EXPECT_EQ(input_2->timestamp(), output_2->timestamp());
EXPECT_EQ(input_2->duration(), output_2->duration());
- EXPECT_EQ(input_2->data_size(), output_2->data_size());
+ EXPECT_EQ(input_2->frame_count(), output_2->frame_count());
}
TEST_F(AudioSplicerTest, EndOfStream) {
- scoped_refptr<DataBuffer> input_1 = GetNextInputBuffer(1);
- scoped_refptr<DataBuffer> input_2 = DataBuffer::CreateEOSBuffer();
- scoped_refptr<DataBuffer> input_3 = GetNextInputBuffer(2);
+ scoped_refptr<AudioBuffer> input_1 = GetNextInputBuffer(0.1f);
+ scoped_refptr<AudioBuffer> input_2 = AudioBuffer::CreateEOSBuffer();
+ scoped_refptr<AudioBuffer> input_3 = GetNextInputBuffer(0.2f);
EXPECT_TRUE(input_2->end_of_stream());
EXPECT_TRUE(splicer_.AddInput(input_1));
EXPECT_TRUE(splicer_.AddInput(input_2));
EXPECT_TRUE(splicer_.HasNextBuffer());
- scoped_refptr<DataBuffer> output_1 = splicer_.GetNextBuffer();
- scoped_refptr<DataBuffer> output_2 = splicer_.GetNextBuffer();
+ scoped_refptr<AudioBuffer> output_1 = splicer_.GetNextBuffer();
+ scoped_refptr<AudioBuffer> output_2 = splicer_.GetNextBuffer();
EXPECT_FALSE(splicer_.HasNextBuffer());
EXPECT_EQ(input_1->timestamp(), output_1->timestamp());
EXPECT_EQ(input_1->duration(), output_1->duration());
- EXPECT_EQ(input_1->data_size(), output_1->data_size());
+ EXPECT_EQ(input_1->frame_count(), output_1->frame_count());
EXPECT_TRUE(output_2->end_of_stream());
// Verify that buffers can be added again after Reset().
splicer_.Reset();
EXPECT_TRUE(splicer_.AddInput(input_3));
- scoped_refptr<DataBuffer> output_3 = splicer_.GetNextBuffer();
+ scoped_refptr<AudioBuffer> output_3 = splicer_.GetNextBuffer();
EXPECT_FALSE(splicer_.HasNextBuffer());
EXPECT_EQ(input_3->timestamp(), output_3->timestamp());
EXPECT_EQ(input_3->duration(), output_3->duration());
- EXPECT_EQ(input_3->data_size(), output_3->data_size());
+ EXPECT_EQ(input_3->frame_count(), output_3->frame_count());
}
@@ -152,30 +161,30 @@ TEST_F(AudioSplicerTest, EndOfStream) {
// |11111111111111|0000|22222222222222|
// +--------------+----+--------------+
TEST_F(AudioSplicerTest, GapInsertion) {
- scoped_refptr<DataBuffer> input_1 = GetNextInputBuffer(1);
+ scoped_refptr<AudioBuffer> input_1 = GetNextInputBuffer(0.1f);
// Add bytes to the timestamp helper so that the next buffer
// will have a starting timestamp that indicates a gap is
// present.
- const int kGapSize = 7 * kBytesPerFrame;
- input_timestamp_helper_.AddBytes(kGapSize);
- scoped_refptr<DataBuffer> input_2 = GetNextInputBuffer(2);
+ const int kGapSize = 7;
+ input_timestamp_helper_.AddFrames(kGapSize);
+ scoped_refptr<AudioBuffer> input_2 = GetNextInputBuffer(0.2f);
EXPECT_TRUE(splicer_.AddInput(input_1));
EXPECT_TRUE(splicer_.AddInput(input_2));
// Verify that a gap buffer is generated.
EXPECT_TRUE(splicer_.HasNextBuffer());
- scoped_refptr<DataBuffer> output_1 = splicer_.GetNextBuffer();
- scoped_refptr<DataBuffer> output_2 = splicer_.GetNextBuffer();
- scoped_refptr<DataBuffer> output_3 = splicer_.GetNextBuffer();
+ scoped_refptr<AudioBuffer> output_1 = splicer_.GetNextBuffer();
+ scoped_refptr<AudioBuffer> output_2 = splicer_.GetNextBuffer();
+ scoped_refptr<AudioBuffer> output_3 = splicer_.GetNextBuffer();
EXPECT_FALSE(splicer_.HasNextBuffer());
// Verify that the first input buffer passed through unmodified.
EXPECT_EQ(input_1->timestamp(), output_1->timestamp());
EXPECT_EQ(input_1->duration(), output_1->duration());
- EXPECT_EQ(input_1->data_size(), output_1->data_size());
- EXPECT_TRUE(VerifyData(output_1->data(), output_1->data_size(), 1));
+ EXPECT_EQ(input_1->frame_count(), output_1->frame_count());
+ EXPECT_TRUE(VerifyData(output_1, 0.1f));
// Verify the contents of the gap buffer.
base::TimeDelta gap_timestamp =
@@ -184,39 +193,39 @@ TEST_F(AudioSplicerTest, GapInsertion) {
EXPECT_GT(gap_duration, base::TimeDelta());
EXPECT_EQ(gap_timestamp, output_2->timestamp());
EXPECT_EQ(gap_duration, output_2->duration());
- EXPECT_EQ(kGapSize, output_2->data_size());
- EXPECT_TRUE(VerifyData(output_2->data(), output_2->data_size(), 0));
+ EXPECT_EQ(kGapSize, output_2->frame_count());
+ EXPECT_TRUE(VerifyData(output_2, 0.0f));
// Verify that the second input buffer passed through unmodified.
EXPECT_EQ(input_2->timestamp(), output_3->timestamp());
EXPECT_EQ(input_2->duration(), output_3->duration());
- EXPECT_EQ(input_2->data_size(), output_3->data_size());
- EXPECT_TRUE(VerifyData(output_3->data(), output_3->data_size(), 2));
+ EXPECT_EQ(input_2->frame_count(), output_3->frame_count());
+ EXPECT_TRUE(VerifyData(output_3, 0.2f));
}
// Test that an error is signalled when the gap between input buffers is
// too large.
TEST_F(AudioSplicerTest, GapTooLarge) {
- scoped_refptr<DataBuffer> input_1 = GetNextInputBuffer(1);
+ scoped_refptr<AudioBuffer> input_1 = GetNextInputBuffer(0.1f);
// Add a seconds worth of bytes so that an unacceptably large
// gap exists between |input_1| and |input_2|.
- const int kGapSize = kDefaultSampleRate * kBytesPerFrame;
- input_timestamp_helper_.AddBytes(kGapSize);
- scoped_refptr<DataBuffer> input_2 = GetNextInputBuffer(2);
+ const int kGapSize = kDefaultSampleRate;
+ input_timestamp_helper_.AddFrames(kGapSize);
+ scoped_refptr<AudioBuffer> input_2 = GetNextInputBuffer(0.2f);
EXPECT_TRUE(splicer_.AddInput(input_1));
EXPECT_FALSE(splicer_.AddInput(input_2));
EXPECT_TRUE(splicer_.HasNextBuffer());
- scoped_refptr<DataBuffer> output_1 = splicer_.GetNextBuffer();
+ scoped_refptr<AudioBuffer> output_1 = splicer_.GetNextBuffer();
// Verify that the first input buffer passed through unmodified.
EXPECT_EQ(input_1->timestamp(), output_1->timestamp());
EXPECT_EQ(input_1->duration(), output_1->duration());
- EXPECT_EQ(input_1->data_size(), output_1->data_size());
- EXPECT_TRUE(VerifyData(output_1->data(), output_1->data_size(), 1));
+ EXPECT_EQ(input_1->frame_count(), output_1->frame_count());
+ EXPECT_TRUE(VerifyData(output_1, 0.1f));
// Verify that the second buffer is not available.
EXPECT_FALSE(splicer_.HasNextBuffer());
@@ -227,15 +236,15 @@ TEST_F(AudioSplicerTest, GapTooLarge) {
input_1->timestamp() + input_1->duration());
// Verify that valid buffers are still accepted.
- scoped_refptr<DataBuffer> input_3 = GetNextInputBuffer(3);
+ scoped_refptr<AudioBuffer> input_3 = GetNextInputBuffer(0.3f);
EXPECT_TRUE(splicer_.AddInput(input_3));
EXPECT_TRUE(splicer_.HasNextBuffer());
- scoped_refptr<DataBuffer> output_2 = splicer_.GetNextBuffer();
+ scoped_refptr<AudioBuffer> output_2 = splicer_.GetNextBuffer();
EXPECT_FALSE(splicer_.HasNextBuffer());
EXPECT_EQ(input_3->timestamp(), output_2->timestamp());
EXPECT_EQ(input_3->duration(), output_2->duration());
- EXPECT_EQ(input_3->data_size(), output_2->data_size());
- EXPECT_TRUE(VerifyData(output_2->data(), output_2->data_size(), 3));
+ EXPECT_EQ(input_3->frame_count(), output_2->frame_count());
+ EXPECT_TRUE(VerifyData(output_2, 0.3f));
}
@@ -244,12 +253,12 @@ TEST_F(AudioSplicerTest, GapTooLarge) {
TEST_F(AudioSplicerTest, BufferAddedBeforeBase) {
input_timestamp_helper_.SetBaseTimestamp(
base::TimeDelta::FromMicroseconds(10));
- scoped_refptr<DataBuffer> input_1 = GetNextInputBuffer(1);
+ scoped_refptr<AudioBuffer> input_1 = GetNextInputBuffer(0.1f);
// Reset the timestamp helper so the next buffer will have a timestamp earlier
// than |input_1|.
input_timestamp_helper_.SetBaseTimestamp(base::TimeDelta::FromSeconds(0));
- scoped_refptr<DataBuffer> input_2 = GetNextInputBuffer(1);
+ scoped_refptr<AudioBuffer> input_2 = GetNextInputBuffer(0.1f);
EXPECT_GT(input_1->timestamp(), input_2->timestamp());
EXPECT_TRUE(splicer_.AddInput(input_1));
@@ -269,41 +278,40 @@ TEST_F(AudioSplicerTest, BufferAddedBeforeBase) {
// |11111111111111|2222222222|
// +--------------+----------+
TEST_F(AudioSplicerTest, PartialOverlap) {
- scoped_refptr<DataBuffer> input_1 = GetNextInputBuffer(1);
+ scoped_refptr<AudioBuffer> input_1 = GetNextInputBuffer(0.1f);
// Reset timestamp helper so that the next buffer will have a
// timestamp that starts in the middle of |input_1|.
- const int kOverlapSize = input_1->data_size() / 4;
+ const int kOverlapSize = input_1->frame_count() / 4;
input_timestamp_helper_.SetBaseTimestamp(input_1->timestamp());
- input_timestamp_helper_.AddBytes(input_1->data_size() - kOverlapSize);
+ input_timestamp_helper_.AddFrames(input_1->frame_count() - kOverlapSize);
- scoped_refptr<DataBuffer> input_2 = GetNextInputBuffer(2);
+ scoped_refptr<AudioBuffer> input_2 = GetNextInputBuffer(0.2f);
EXPECT_TRUE(splicer_.AddInput(input_1));
EXPECT_TRUE(splicer_.AddInput(input_2));
EXPECT_TRUE(splicer_.HasNextBuffer());
- scoped_refptr<DataBuffer> output_1 = splicer_.GetNextBuffer();
- scoped_refptr<DataBuffer> output_2 = splicer_.GetNextBuffer();
+ scoped_refptr<AudioBuffer> output_1 = splicer_.GetNextBuffer();
+ scoped_refptr<AudioBuffer> output_2 = splicer_.GetNextBuffer();
EXPECT_FALSE(splicer_.HasNextBuffer());
// Verify that the first input buffer passed through unmodified.
EXPECT_EQ(input_1->timestamp(), output_1->timestamp());
EXPECT_EQ(input_1->duration(), output_1->duration());
- EXPECT_EQ(input_1->data_size(), output_1->data_size());
- EXPECT_TRUE(VerifyData(output_1->data(), output_1->data_size(), 1));
-
+ EXPECT_EQ(input_1->frame_count(), output_1->frame_count());
+ EXPECT_TRUE(VerifyData(output_1, 0.1f));
// Verify that the second input buffer was truncated to only contain
- // the samples that are after the end of |input_1|.
+ // the samples that are after the end of |input_1|. Note that data is not
+ // copied, so |input_2|'s values are modified.
base::TimeDelta expected_timestamp =
input_1->timestamp() + input_1->duration();
base::TimeDelta expected_duration =
(input_2->timestamp() + input_2->duration()) - expected_timestamp;
EXPECT_EQ(expected_timestamp, output_2->timestamp());
EXPECT_EQ(expected_duration, output_2->duration());
- EXPECT_EQ(input_2->data_size() - kOverlapSize, output_2->data_size());
- EXPECT_TRUE(VerifyData(output_2->data(), output_2->data_size(), 2));
+ EXPECT_TRUE(VerifyData(output_2, 0.2f));
}
@@ -323,44 +331,44 @@ TEST_F(AudioSplicerTest, PartialOverlap) {
// |11111111111111|3333333333333|
// +--------------+-------------+
TEST_F(AudioSplicerTest, DropBuffer) {
- scoped_refptr<DataBuffer> input_1 = GetNextInputBuffer(1);
+ scoped_refptr<AudioBuffer> input_1 = GetNextInputBuffer(0.1f);
// Reset timestamp helper so that the next buffer will have a
// timestamp that starts in the middle of |input_1|.
- const int kOverlapOffset = input_1->data_size() / 2;
- const int kOverlapSize = input_1->data_size() / 4;
+ const int kOverlapOffset = input_1->frame_count() / 2;
+ const int kOverlapSize = input_1->frame_count() / 4;
input_timestamp_helper_.SetBaseTimestamp(input_1->timestamp());
- input_timestamp_helper_.AddBytes(kOverlapOffset);
+ input_timestamp_helper_.AddFrames(kOverlapOffset);
- scoped_refptr<DataBuffer> input_2 = GetNextInputBuffer(2, kOverlapSize);
+ scoped_refptr<AudioBuffer> input_2 = GetNextInputBuffer(0.2f, kOverlapSize);
// Reset the timestamp helper so the next buffer will be right after
// |input_1|.
input_timestamp_helper_.SetBaseTimestamp(input_1->timestamp());
- input_timestamp_helper_.AddBytes(input_1->data_size());
- scoped_refptr<DataBuffer> input_3 = GetNextInputBuffer(3);
+ input_timestamp_helper_.AddFrames(input_1->frame_count());
+ scoped_refptr<AudioBuffer> input_3 = GetNextInputBuffer(0.3f);
EXPECT_TRUE(splicer_.AddInput(input_1));
EXPECT_TRUE(splicer_.AddInput(input_2));
EXPECT_TRUE(splicer_.AddInput(input_3));
EXPECT_TRUE(splicer_.HasNextBuffer());
- scoped_refptr<DataBuffer> output_1 = splicer_.GetNextBuffer();
- scoped_refptr<DataBuffer> output_2 = splicer_.GetNextBuffer();
+ scoped_refptr<AudioBuffer> output_1 = splicer_.GetNextBuffer();
+ scoped_refptr<AudioBuffer> output_2 = splicer_.GetNextBuffer();
EXPECT_FALSE(splicer_.HasNextBuffer());
// Verify that the first input buffer passed through unmodified.
EXPECT_EQ(input_1->timestamp(), output_1->timestamp());
EXPECT_EQ(input_1->duration(), output_1->duration());
- EXPECT_EQ(input_1->data_size(), output_1->data_size());
- EXPECT_TRUE(VerifyData(output_1->data(), output_1->data_size(), 1));
+ EXPECT_EQ(input_1->frame_count(), output_1->frame_count());
+ EXPECT_TRUE(VerifyData(output_1, 0.1f));
// Verify that the second output buffer only contains
// the samples that are in |input_3|.
EXPECT_EQ(input_3->timestamp(), output_2->timestamp());
EXPECT_EQ(input_3->duration(), output_2->duration());
- EXPECT_EQ(input_3->data_size(), output_2->data_size());
- EXPECT_TRUE(VerifyData(output_2->data(), output_2->data_size(), 3));
+ EXPECT_EQ(input_3->frame_count(), output_2->frame_count());
+ EXPECT_TRUE(VerifyData(output_2, 0.3f));
}
} // namespace media
diff --git a/media/base/audio_timestamp_helper.cc b/media/base/audio_timestamp_helper.cc
index a3f37c4..38fde1f 100644
--- a/media/base/audio_timestamp_helper.cc
+++ b/media/base/audio_timestamp_helper.cc
@@ -9,12 +9,9 @@
namespace media {
-AudioTimestampHelper::AudioTimestampHelper(int bytes_per_frame,
- int samples_per_second)
- : bytes_per_frame_(bytes_per_frame),
- base_timestamp_(kNoTimestamp()),
+AudioTimestampHelper::AudioTimestampHelper(int samples_per_second)
+ : base_timestamp_(kNoTimestamp()),
frame_count_(0) {
- DCHECK_GT(bytes_per_frame, 0);
DCHECK_GT(samples_per_second, 0);
double fps = samples_per_second;
microseconds_per_frame_ = base::Time::kMicrosecondsPerSecond / fps;
@@ -29,27 +26,23 @@ base::TimeDelta AudioTimestampHelper::base_timestamp() const {
return base_timestamp_;
}
-void AudioTimestampHelper::AddBytes(int byte_count) {
- DCHECK_GE(byte_count, 0);
+void AudioTimestampHelper::AddFrames(int frame_count) {
+ DCHECK_GE(frame_count, 0);
DCHECK(base_timestamp_ != kNoTimestamp());
- DCHECK_EQ(byte_count % bytes_per_frame_, 0);
- frame_count_ += byte_count / bytes_per_frame_;
+ frame_count_ += frame_count;
}
base::TimeDelta AudioTimestampHelper::GetTimestamp() const {
return ComputeTimestamp(frame_count_);
}
-base::TimeDelta AudioTimestampHelper::GetDuration(int byte_count) const {
- DCHECK_GE(byte_count, 0);
- DCHECK_EQ(byte_count % bytes_per_frame_, 0);
- int frames = byte_count / bytes_per_frame_;
- base::TimeDelta end_timestamp = ComputeTimestamp(frame_count_ + frames);
+base::TimeDelta AudioTimestampHelper::GetFrameDuration(int frame_count) const {
+ DCHECK_GE(frame_count, 0);
+ base::TimeDelta end_timestamp = ComputeTimestamp(frame_count_ + frame_count);
return end_timestamp - GetTimestamp();
}
-int64 AudioTimestampHelper::GetBytesToTarget(
- base::TimeDelta target) const {
+int64 AudioTimestampHelper::GetFramesToTarget(base::TimeDelta target) const {
DCHECK(base_timestamp_ != kNoTimestamp());
DCHECK(target >= base_timestamp_);
@@ -68,7 +61,7 @@ int64 AudioTimestampHelper::GetBytesToTarget(
double threshold = microseconds_per_frame_ / 2;
int64 target_frame_count =
(delta_from_base.InMicroseconds() + threshold) / microseconds_per_frame_;
- return bytes_per_frame_ * (target_frame_count - frame_count_);
+ return target_frame_count - frame_count_;
}
base::TimeDelta AudioTimestampHelper::ComputeTimestamp(
diff --git a/media/base/audio_timestamp_helper.h b/media/base/audio_timestamp_helper.h
index db56523..8b5d50e 100644
--- a/media/base/audio_timestamp_helper.h
+++ b/media/base/audio_timestamp_helper.h
@@ -10,60 +10,57 @@
namespace media {
-// Generates timestamps for a sequence of audio sample bytes. This class should
+// Generates timestamps for a sequence of audio sample frames. This class should
// be used any place timestamps need to be calculated for a sequence of audio
// samples. It helps avoid timestamps inaccuracies caused by rounding/truncation
// in repeated sample count to timestamp conversions.
//
-// The class is constructed with bytes per frame and samples_per_second
-// information so that it can convert audio sample byte counts into timestamps.
-// After the object is constructed, SetBaseTimestamp() must be called to specify
-// the starting timestamp of the audio sequence. As audio samples are received,
-// their byte counts are added to AddBytes(). These byte counts are
-// accumulated by this class so GetTimestamp() can be used to determine the
-// timestamp for the samples that have been added. GetDuration() calculates
-// the proper duration values for samples added to the current timestamp.
-// GetBytesToTarget() determines the number of bytes that need to be
-// added/removed from the accumulated bytes to reach a target timestamp.
+// The class is constructed with samples_per_second information so that it can
+// convert audio sample frame counts into timestamps. After the object is
+// constructed, SetBaseTimestamp() must be called to specify the starting
+// timestamp of the audio sequence. As audio samples are received, their frame
+// counts are added using AddFrames(). These frame counts are accumulated by
+// this class so GetTimestamp() can be used to determine the timestamp for the
+// samples that have been added. GetDuration() calculates the proper duration
+// values for samples added to the current timestamp. GetFramesToTarget()
+// determines the number of frames that need to be added/removed from the
+// accumulated frames to reach a target timestamp.
class MEDIA_EXPORT AudioTimestampHelper {
public:
- AudioTimestampHelper(int bytes_per_frame, int samples_per_second);
+ AudioTimestampHelper(int samples_per_second);
// Sets the base timestamp to |base_timestamp| and the sets count to 0.
void SetBaseTimestamp(base::TimeDelta base_timestamp);
base::TimeDelta base_timestamp() const;
- // Adds sample bytes to the frame counter.
- //
+ // Adds |frame_count| to the frame counter.
// Note: SetBaseTimestamp() must be called with a value other than
// kNoTimestamp() before this method can be called.
- void AddBytes(int byte_count);
+ void AddFrames(int frame_count);
// Get the current timestamp. This value is computed from the base_timestamp()
- // and the number of sample bytes that have been added so far.
+ // and the number of sample frames that have been added so far.
base::TimeDelta GetTimestamp() const;
- // Gets the duration if |byte_count| bytes were added to the current
+ // Gets the duration if |frame_count| frames were added to the current
// timestamp reported by GetTimestamp(). This method ensures that
- // (GetTimestamp() + GetDuration(n)) will equal the timestamp that
- // GetTimestamp() will return if AddBytes(n) is called.
- base::TimeDelta GetDuration(int byte_count) const;
+ // (GetTimestamp() + GetFrameDuration(n)) will equal the timestamp that
+ // GetTimestamp() will return if AddFrames(n) is called.
+ base::TimeDelta GetFrameDuration(int frame_count) const;
- // Returns the number of bytes needed to reach the target timestamp.
- //
+ // Returns the number of frames needed to reach the target timestamp.
// Note: |target| must be >= |base_timestamp_|.
- int64 GetBytesToTarget(base::TimeDelta target) const;
+ int64 GetFramesToTarget(base::TimeDelta target) const;
private:
base::TimeDelta ComputeTimestamp(int64 frame_count) const;
- int bytes_per_frame_;
double microseconds_per_frame_;
base::TimeDelta base_timestamp_;
- // Number of frames accumulated by byte counts passed to AddBytes() calls.
+ // Number of frames accumulated by AddFrames() calls.
int64 frame_count_;
DISALLOW_IMPLICIT_CONSTRUCTORS(AudioTimestampHelper);
diff --git a/media/base/audio_timestamp_helper_unittest.cc b/media/base/audio_timestamp_helper_unittest.cc
index 5f5bb4e..a0cfa3b 100644
--- a/media/base/audio_timestamp_helper_unittest.cc
+++ b/media/base/audio_timestamp_helper_unittest.cc
@@ -8,31 +8,31 @@
namespace media {
-static const int kBytesPerFrame = 4;
static const int kDefaultSampleRate = 44100;
class AudioTimestampHelperTest : public ::testing::Test {
public:
- AudioTimestampHelperTest()
- : helper_(kBytesPerFrame, kDefaultSampleRate) {
+ AudioTimestampHelperTest() : helper_(kDefaultSampleRate) {
helper_.SetBaseTimestamp(base::TimeDelta());
}
- // Adds bytes to the helper and returns the current timestamp in microseconds.
- int64 AddBytes(int bytes) {
- helper_.AddBytes(bytes);
+ // Adds frames to the helper and returns the current timestamp in
+ // microseconds.
+ int64 AddFrames(int frames) {
+ helper_.AddFrames(frames);
return helper_.GetTimestamp().InMicroseconds();
}
- int64 BytesToTarget(int target_in_microseconds) {
- return helper_.GetBytesToTarget(
+ int64 FramesToTarget(int target_in_microseconds) {
+ return helper_.GetFramesToTarget(
base::TimeDelta::FromMicroseconds(target_in_microseconds));
}
- void TestGetBytesToTargetRange(int byte_count, int start, int end) {
- for (int i = start; i <= end; ++i)
- EXPECT_EQ(byte_count,BytesToTarget(i)) << " Failure for timestamp "
- << i << " us.";
+ void TestGetFramesToTargetRange(int frame_count, int start, int end) {
+ for (int i = start; i <= end; ++i) {
+ EXPECT_EQ(frame_count, FramesToTarget(i)) << " Failure for timestamp "
+ << i << " us.";
+ }
}
protected:
@@ -48,22 +48,22 @@ TEST_F(AudioTimestampHelperTest, Basic) {
// nearest microsecond. 1 frame @ 44100 is ~22.67573 microseconds,
// which is why the timestamp sometimes increments by 23 microseconds
// and other times it increments by 22 microseconds.
- EXPECT_EQ(0, AddBytes(0));
- EXPECT_EQ(22, AddBytes(kBytesPerFrame));
- EXPECT_EQ(45, AddBytes(kBytesPerFrame));
- EXPECT_EQ(68, AddBytes(kBytesPerFrame));
- EXPECT_EQ(90, AddBytes(kBytesPerFrame));
- EXPECT_EQ(113, AddBytes(kBytesPerFrame));
-
- // Verify that adding bytes one frame at a time matches the timestamp returned
- // if the same number of bytes are added all at once.
+ EXPECT_EQ(0, AddFrames(0));
+ EXPECT_EQ(22, AddFrames(1));
+ EXPECT_EQ(45, AddFrames(1));
+ EXPECT_EQ(68, AddFrames(1));
+ EXPECT_EQ(90, AddFrames(1));
+ EXPECT_EQ(113, AddFrames(1));
+
+ // Verify that adding frames one frame at a time matches the timestamp
+ // returned if the same number of frames are added all at once.
base::TimeDelta timestamp_1 = helper_.GetTimestamp();
helper_.SetBaseTimestamp(kNoTimestamp());
EXPECT_TRUE(kNoTimestamp() == helper_.base_timestamp());
helper_.SetBaseTimestamp(base::TimeDelta());
EXPECT_EQ(0, helper_.GetTimestamp().InMicroseconds());
- helper_.AddBytes(5 * kBytesPerFrame);
+ helper_.AddFrames(5);
EXPECT_EQ(113, helper_.GetTimestamp().InMicroseconds());
EXPECT_TRUE(timestamp_1 == helper_.GetTimestamp());
}
@@ -72,53 +72,51 @@ TEST_F(AudioTimestampHelperTest, Basic) {
TEST_F(AudioTimestampHelperTest, GetDuration) {
helper_.SetBaseTimestamp(base::TimeDelta::FromMicroseconds(100));
- int byte_count = 5 * kBytesPerFrame;
+ int frame_count = 5;
int64 expected_durations[] = { 113, 113, 114, 113, 113, 114 };
for (size_t i = 0; i < arraysize(expected_durations); ++i) {
- base::TimeDelta duration = helper_.GetDuration(byte_count);
+ base::TimeDelta duration = helper_.GetFrameDuration(frame_count);
EXPECT_EQ(expected_durations[i], duration.InMicroseconds());
base::TimeDelta timestamp_1 = helper_.GetTimestamp() + duration;
- helper_.AddBytes(byte_count);
+ helper_.AddFrames(frame_count);
base::TimeDelta timestamp_2 = helper_.GetTimestamp();
EXPECT_TRUE(timestamp_1 == timestamp_2);
}
}
-TEST_F(AudioTimestampHelperTest, GetBytesToTarget) {
- // Verify GetBytesToTarget() rounding behavior.
+TEST_F(AudioTimestampHelperTest, GetFramesToTarget) {
+ // Verify GetFramesToTarget() rounding behavior.
// 1 frame @ 44100 is ~22.67573 microseconds,
// Test values less than half of the frame duration.
- TestGetBytesToTargetRange(0, 0, 11);
+ TestGetFramesToTargetRange(0, 0, 11);
// Test values between half the frame duration & the
// full frame duration.
- TestGetBytesToTargetRange(kBytesPerFrame, 12, 22);
+ TestGetFramesToTargetRange(1, 12, 22);
- // Verify that the same number of bytes is returned up
+ // Verify that the same number of frames is returned up
// to the next half a frame.
- TestGetBytesToTargetRange(kBytesPerFrame, 23, 34);
+ TestGetFramesToTargetRange(1, 23, 34);
// Verify the next 3 ranges.
- TestGetBytesToTargetRange(2 * kBytesPerFrame, 35, 56);
- TestGetBytesToTargetRange(3 * kBytesPerFrame, 57, 79);
- TestGetBytesToTargetRange(4 * kBytesPerFrame, 80, 102);
- TestGetBytesToTargetRange(5 * kBytesPerFrame, 103, 124);
+ TestGetFramesToTargetRange(2, 35, 56);
+ TestGetFramesToTargetRange(3, 57, 79);
+ TestGetFramesToTargetRange(4, 80, 102);
+ TestGetFramesToTargetRange(5, 103, 124);
-
- // Add bytes to the helper so negative byte counts can
- // be tested.
- helper_.AddBytes(5 * kBytesPerFrame);
+ // Add frames to the helper so negative frame counts can be tested.
+ helper_.AddFrames(5);
// Note: The timestamp ranges must match the positive values
// tested above to verify that the code is rounding properly.
- TestGetBytesToTargetRange(0 * kBytesPerFrame, 103, 124);
- TestGetBytesToTargetRange(-1 * kBytesPerFrame, 80, 102);
- TestGetBytesToTargetRange(-2 * kBytesPerFrame, 57, 79);
- TestGetBytesToTargetRange(-3 * kBytesPerFrame, 35, 56);
- TestGetBytesToTargetRange(-4 * kBytesPerFrame, 12, 34);
- TestGetBytesToTargetRange(-5 * kBytesPerFrame, 0, 11);
+ TestGetFramesToTargetRange(0, 103, 124);
+ TestGetFramesToTargetRange(-1, 80, 102);
+ TestGetFramesToTargetRange(-2, 57, 79);
+ TestGetFramesToTargetRange(-3, 35, 56);
+ TestGetFramesToTargetRange(-4, 12, 34);
+ TestGetFramesToTargetRange(-5, 0, 11);
}
} // namespace media
diff --git a/media/base/decryptor.h b/media/base/decryptor.h
index 3cee32dc..2c2cf79 100644
--- a/media/base/decryptor.h
+++ b/media/base/decryptor.h
@@ -10,12 +10,12 @@
#include "base/basictypes.h"
#include "base/callback.h"
#include "base/memory/ref_counted.h"
+#include "media/base/audio_buffer.h"
#include "media/base/media_export.h"
namespace media {
class AudioDecoderConfig;
-class DataBuffer;
class DecoderBuffer;
class VideoDecoderConfig;
class VideoFrame;
@@ -103,7 +103,7 @@ class MEDIA_EXPORT Decryptor {
// Helper structure for managing multiple decoded audio buffers per input.
// TODO(xhwang): Rename this to AudioFrames.
- typedef std::list<scoped_refptr<DataBuffer> > AudioBuffers;
+ typedef std::list<scoped_refptr<AudioBuffer> > AudioBuffers;
// Indicates completion of audio/video decrypt-and-decode operation.
//
diff --git a/media/base/test_helpers.cc b/media/base/test_helpers.cc
index 54850f9..5f7604b 100644
--- a/media/base/test_helpers.cc
+++ b/media/base/test_helpers.cc
@@ -153,7 +153,8 @@ scoped_refptr<AudioBuffer> MakeInterleavedAudioBuffer(
T start,
T increment,
int frames,
- base::TimeDelta start_time) {
+ base::TimeDelta start_time,
+ base::TimeDelta duration) {
DCHECK(format == kSampleFormatU8 || format == kSampleFormatS16 ||
format == kSampleFormatS32 || format == kSampleFormatF32);
@@ -173,8 +174,6 @@ scoped_refptr<AudioBuffer> MakeInterleavedAudioBuffer(
buffer[i] = start;
start += increment;
}
- // Duration is 1 second per frame (for simplicity).
- base::TimeDelta duration = base::TimeDelta::FromSeconds(frames);
return AudioBuffer::CopyFrom(
format, channels, frames, data, start_time, duration);
}
@@ -186,7 +185,8 @@ scoped_refptr<AudioBuffer> MakePlanarAudioBuffer(
T start,
T increment,
int frames,
- base::TimeDelta start_time) {
+ base::TimeDelta start_time,
+ base::TimeDelta duration) {
DCHECK(format == kSampleFormatPlanarF32 || format == kSampleFormatPlanarS16);
// Create multiple blocks of data, one for each channel.
@@ -209,8 +209,6 @@ scoped_refptr<AudioBuffer> MakePlanarAudioBuffer(
start += increment;
}
}
- // Duration is 1 second per frame (for simplicity).
- base::TimeDelta duration = base::TimeDelta::FromSeconds(frames);
return AudioBuffer::CopyFrom(
format, channels, frames, data.get(), start_time, duration);
}
@@ -225,7 +223,8 @@ scoped_refptr<AudioBuffer> MakePlanarAudioBuffer(
type start, \
type increment, \
int frames, \
- base::TimeDelta start_time)
+ base::TimeDelta start_time, \
+ base::TimeDelta duration)
DEFINE_INTERLEAVED_INSTANCE(uint8);
DEFINE_INTERLEAVED_INSTANCE(int16);
DEFINE_INTERLEAVED_INSTANCE(int32);
@@ -238,7 +237,8 @@ DEFINE_INTERLEAVED_INSTANCE(float);
type start, \
type increment, \
int frames, \
- base::TimeDelta start_time);
+ base::TimeDelta start_time, \
+ base::TimeDelta duration);
DEFINE_PLANAR_INSTANCE(int16);
DEFINE_PLANAR_INSTANCE(float);
diff --git a/media/base/test_helpers.h b/media/base/test_helpers.h
index 7eebfd9..a7eb8f5 100644
--- a/media/base/test_helpers.h
+++ b/media/base/test_helpers.h
@@ -94,8 +94,8 @@ class TestVideoConfig {
// requires data to be of type T, but it is verified that |format| is an
// interleaved format.
//
-// |start_time| will be used as the start time for the samples. Duration is set
-// to 1 second per frame, to simplify calculations.
+// |start_time| will be used as the start time for the samples. |duration| is
+// the duration.
template <class T>
scoped_refptr<AudioBuffer> MakeInterleavedAudioBuffer(
SampleFormat format,
@@ -103,7 +103,8 @@ scoped_refptr<AudioBuffer> MakeInterleavedAudioBuffer(
T start,
T increment,
int frames,
- base::TimeDelta start_time);
+ base::TimeDelta start_time,
+ base::TimeDelta duration);
// Create an AudioBuffer containing |frames| frames of data, where each sample
// is of type T. Since this is planar data, there will be a block for each of
@@ -117,8 +118,8 @@ scoped_refptr<AudioBuffer> MakeInterleavedAudioBuffer(
// similar. No check is done that |format| requires data to be of type T, but it
// is verified that |format| is a planar format.
//
-// |start_time| will be used as the start time for the samples. Duration is set
-// to 1 second per frame, to simplify calculations.
+// |start_time| will be used as the start time for the samples. |duration| is
+// the duration.
template <class T>
scoped_refptr<AudioBuffer> MakePlanarAudioBuffer(
SampleFormat format,
@@ -126,7 +127,8 @@ scoped_refptr<AudioBuffer> MakePlanarAudioBuffer(
T start,
T increment,
int frames,
- base::TimeDelta start_time);
+ base::TimeDelta start_time,
+ base::TimeDelta duration);
} // namespace media
diff --git a/media/filters/audio_renderer_algorithm.cc b/media/filters/audio_renderer_algorithm.cc
index c6b7808..97f0811 100644
--- a/media/filters/audio_renderer_algorithm.cc
+++ b/media/filters/audio_renderer_algorithm.cc
@@ -10,18 +10,19 @@
#include "base/logging.h"
#include "base/memory/scoped_ptr.h"
#include "media/audio/audio_util.h"
-#include "media/base/data_buffer.h"
+#include "media/base/audio_buffer.h"
+#include "media/base/audio_bus.h"
namespace media {
-// The starting size in bytes for |audio_buffer_|.
-// Previous usage maintained a deque of 16 DataBuffers, each of size 4Kb. This
-// worked well, so we maintain this number of bytes (16 * 4096).
-static const int kStartingBufferSizeInBytes = 65536;
+// The starting size in frames for |audio_buffer_|. Previous usage maintained a
+// queue of 16 AudioBuffers, each of 512 frames. This worked well, so we
+// maintain this number of frames.
+static const int kStartingBufferSizeInFrames = 16 * 512;
-// The maximum size in bytes for the |audio_buffer_|. Arbitrarily determined.
+// The maximum size in frames for the |audio_buffer_|. Arbitrarily determined.
// This number represents 3 seconds of 96kHz/16 bit 7.1 surround sound.
-static const int kMaxBufferSizeInBytes = 4608000;
+static const int kMaxBufferSizeInFrames = 3 * 96000;
// Duration of audio segments used for crossfading (in seconds).
static const double kWindowDuration = 0.08;
@@ -38,15 +39,14 @@ static const float kMaxPlaybackRate = 4.0f;
AudioRendererAlgorithm::AudioRendererAlgorithm()
: channels_(0),
samples_per_second_(0),
- bytes_per_channel_(0),
- playback_rate_(0.0f),
- audio_buffer_(0, kStartingBufferSizeInBytes),
- bytes_in_crossfade_(0),
- bytes_per_frame_(0),
+ playback_rate_(0),
+ frames_in_crossfade_(0),
index_into_window_(0),
crossfade_frame_number_(0),
muted_(false),
- window_size_(0) {
+ muted_partial_frame_(0),
+ window_size_(0),
+ capacity_(kStartingBufferSizeInFrames) {
}
AudioRendererAlgorithm::~AudioRendererAlgorithm() {}
@@ -57,76 +57,83 @@ void AudioRendererAlgorithm::Initialize(float initial_playback_rate,
channels_ = params.channels();
samples_per_second_ = params.sample_rate();
- bytes_per_channel_ = params.bits_per_sample() / 8;
- bytes_per_frame_ = params.GetBytesPerFrame();
SetPlaybackRate(initial_playback_rate);
- window_size_ =
- samples_per_second_ * bytes_per_channel_ * channels_ * kWindowDuration;
- AlignToFrameBoundary(&window_size_);
-
- bytes_in_crossfade_ =
- samples_per_second_ * bytes_per_channel_ * channels_ * kCrossfadeDuration;
- AlignToFrameBoundary(&bytes_in_crossfade_);
-
- crossfade_buffer_.reset(new uint8[bytes_in_crossfade_]);
+ window_size_ = samples_per_second_ * kWindowDuration;
+ frames_in_crossfade_ = samples_per_second_ * kCrossfadeDuration;
+ crossfade_buffer_ = AudioBus::Create(channels_, frames_in_crossfade_);
}
-int AudioRendererAlgorithm::FillBuffer(uint8* dest, int requested_frames) {
- DCHECK_NE(bytes_per_frame_, 0);
-
- if (playback_rate_ == 0.0f)
+int AudioRendererAlgorithm::FillBuffer(AudioBus* dest, int requested_frames) {
+ if (playback_rate_ == 0)
return 0;
- // Optimze the |muted_| case to issue a single memset instead of performing
+ // Optimize the |muted_| case to issue a single clear instead of performing
// the full crossfade and clearing each crossfaded frame.
if (muted_) {
- const int frames_to_render = std::min(static_cast<int>(
- (audio_buffer_.forward_bytes() / bytes_per_frame_) / playback_rate_),
- requested_frames);
- memset(dest, 0, frames_to_render * bytes_per_frame_);
- audio_buffer_.Seek(bytes_per_frame_ * frames_to_render * playback_rate_);
+ int frames_to_render =
+ std::min(static_cast<int>(audio_buffer_.frames() / playback_rate_),
+ requested_frames);
+
+ // Compute accurate number of frames to actually skip in the source data.
+ // Includes the leftover partial frame from last request. However, we can
+ // only skip over complete frames, so a partial frame may remain for next
+ // time.
+ muted_partial_frame_ += frames_to_render * playback_rate_;
+ int seek_frames = static_cast<int>(muted_partial_frame_);
+ dest->ZeroFrames(frames_to_render);
+ audio_buffer_.SeekFrames(seek_frames);
+
+ // Determine the partial frame that remains to be skipped for next call. If
+ // the user switches back to playing, it may be off time by this partial
+ // frame, which would be undetectable. If they subsequently switch to
+ // another playback rate that mutes, the code will attempt to line up the
+ // frames again.
+ muted_partial_frame_ -= seek_frames;
return frames_to_render;
}
int slower_step = ceil(window_size_ * playback_rate_);
int faster_step = ceil(window_size_ / playback_rate_);
- AlignToFrameBoundary(&slower_step);
- AlignToFrameBoundary(&faster_step);
// Optimize the most common |playback_rate_| ~= 1 case to use a single copy
// instead of copying frame by frame.
if (window_size_ <= faster_step && slower_step >= window_size_) {
- const int frames_to_copy = std::min(
- audio_buffer_.forward_bytes() / bytes_per_frame_, requested_frames);
- const int bytes_to_copy = bytes_per_frame_ * frames_to_copy;
- const int bytes_read = audio_buffer_.Read(dest, bytes_to_copy);
- DCHECK_EQ(bytes_read, bytes_to_copy);
- return frames_to_copy;
+ const int frames_to_copy =
+ std::min(audio_buffer_.frames(), requested_frames);
+ const int frames_read = audio_buffer_.ReadFrames(frames_to_copy, 0, dest);
+ DCHECK_EQ(frames_read, frames_to_copy);
+ return frames_read;
}
int total_frames_rendered = 0;
- uint8* output_ptr = dest;
while (total_frames_rendered < requested_frames) {
- if (index_into_window_ == window_size_)
+ if (index_into_window_ >= window_size_)
ResetWindow();
- bool rendered_frame = true;
+ int rendered_frames = 0;
if (window_size_ > faster_step) {
- rendered_frame = OutputFasterPlayback(
- output_ptr, window_size_, faster_step);
+ rendered_frames =
+ OutputFasterPlayback(dest,
+ total_frames_rendered,
+ requested_frames - total_frames_rendered,
+ window_size_,
+ faster_step);
} else if (slower_step < window_size_) {
- rendered_frame = OutputSlowerPlayback(
- output_ptr, slower_step, window_size_);
+ rendered_frames =
+ OutputSlowerPlayback(dest,
+ total_frames_rendered,
+ requested_frames - total_frames_rendered,
+ slower_step,
+ window_size_);
} else {
NOTREACHED();
}
- if (!rendered_frame)
+ if (rendered_frames == 0)
break;
- output_ptr += bytes_per_frame_;
- total_frames_rendered++;
+ total_frames_rendered += rendered_frames;
}
return total_frames_rendered;
}
@@ -137,17 +144,19 @@ void AudioRendererAlgorithm::ResetWindow() {
crossfade_frame_number_ = 0;
}
-bool AudioRendererAlgorithm::OutputFasterPlayback(uint8* dest,
- int input_step,
- int output_step) {
+int AudioRendererAlgorithm::OutputFasterPlayback(AudioBus* dest,
+ int dest_offset,
+ int requested_frames,
+ int input_step,
+ int output_step) {
// Ensure we don't run into OOB read/write situation.
CHECK_GT(input_step, output_step);
DCHECK_LT(index_into_window_, window_size_);
DCHECK_GT(playback_rate_, 1.0);
DCHECK(!muted_);
- if (audio_buffer_.forward_bytes() < bytes_per_frame_)
- return false;
+ if (audio_buffer_.frames() < 1)
+ return 0;
// The audio data is output in a series of windows. For sped-up playback,
// the window is comprised of the following phases:
@@ -159,11 +168,10 @@ bool AudioRendererAlgorithm::OutputFasterPlayback(uint8* dest,
//
// The duration of each phase is computed below based on the |window_size_|
// and |playback_rate_|.
- int bytes_to_crossfade = bytes_in_crossfade_;
- DCHECK_LE(bytes_to_crossfade, output_step);
+ DCHECK_LE(frames_in_crossfade_, output_step);
// This is the index of the end of phase a, beginning of phase b.
- int outtro_crossfade_begin = output_step - bytes_to_crossfade;
+ int outtro_crossfade_begin = output_step - frames_in_crossfade_;
// This is the index of the end of phase b, beginning of phase c.
int outtro_crossfade_end = output_step;
@@ -171,67 +179,81 @@ bool AudioRendererAlgorithm::OutputFasterPlayback(uint8* dest,
// This is the index of the end of phase c, beginning of phase d.
// This phase continues until |index_into_window_| reaches |window_size_|, at
// which point the window restarts.
- int intro_crossfade_begin = input_step - bytes_to_crossfade;
+ int intro_crossfade_begin = input_step - frames_in_crossfade_;
- // a) Output a raw frame if we haven't reached the crossfade section.
+ // a) Output raw frames if we haven't reached the crossfade section.
if (index_into_window_ < outtro_crossfade_begin) {
- CopyWithAdvance(dest);
- index_into_window_ += bytes_per_frame_;
- return true;
+ // Read as many frames as we can and return the count. If it's not enough,
+ // we will get called again.
+ const int frames_to_copy =
+ std::min(requested_frames, outtro_crossfade_begin - index_into_window_);
+ int copied = audio_buffer_.ReadFrames(frames_to_copy, dest_offset, dest);
+ index_into_window_ += copied;
+ return copied;
}
// b) Save outtro crossfade frames into intermediate buffer, but do not output
// anything to |dest|.
- while (index_into_window_ < outtro_crossfade_end) {
- if (audio_buffer_.forward_bytes() < bytes_per_frame_)
- return false;
-
+ if (index_into_window_ < outtro_crossfade_end) {
// This phase only applies if there are bytes to crossfade.
- DCHECK_GT(bytes_to_crossfade, 0);
- uint8* place_to_copy = crossfade_buffer_.get() +
- (index_into_window_ - outtro_crossfade_begin);
- CopyWithAdvance(place_to_copy);
- index_into_window_ += bytes_per_frame_;
+ DCHECK_GT(frames_in_crossfade_, 0);
+ int crossfade_start = index_into_window_ - outtro_crossfade_begin;
+ int crossfade_count = outtro_crossfade_end - index_into_window_;
+ int copied = audio_buffer_.ReadFrames(
+ crossfade_count, crossfade_start, crossfade_buffer_.get());
+ index_into_window_ += copied;
+
+ // Did we get all the frames we need? If not, return and let subsequent
+ // calls try to get the rest.
+ if (copied != crossfade_count)
+ return 0;
}
// c) Drop frames until we reach the intro crossfade section.
- while (index_into_window_ < intro_crossfade_begin) {
- if (audio_buffer_.forward_bytes() < bytes_per_frame_)
- return false;
-
- DropFrame();
- index_into_window_ += bytes_per_frame_;
+ if (index_into_window_ < intro_crossfade_begin) {
+ // Check if there is enough data to skip all the frames needed. If not,
+ // return 0 and let subsequent calls try to skip it all.
+ int seek_frames = intro_crossfade_begin - index_into_window_;
+ if (audio_buffer_.frames() < seek_frames)
+ return 0;
+ audio_buffer_.SeekFrames(seek_frames);
+
+ // We've dropped all the frames that need to be dropped.
+ index_into_window_ += seek_frames;
}
- // Return if we have run out of data after Phase c).
- if (audio_buffer_.forward_bytes() < bytes_per_frame_)
- return false;
-
- // d) Crossfade and output a frame.
- DCHECK_GT(bytes_to_crossfade, 0);
+ // d) Crossfade and output a frame, as long as we have data.
+ if (audio_buffer_.frames() < 1)
+ return 0;
+ DCHECK_GT(frames_in_crossfade_, 0);
DCHECK_LT(index_into_window_, window_size_);
+
int offset_into_buffer = index_into_window_ - intro_crossfade_begin;
- memcpy(dest, crossfade_buffer_.get() + offset_into_buffer,
- bytes_per_frame_);
- scoped_ptr<uint8[]> intro_frame_ptr(new uint8[bytes_per_frame_]);
- audio_buffer_.Read(intro_frame_ptr.get(), bytes_per_frame_);
- OutputCrossfadedFrame(dest, intro_frame_ptr.get());
- index_into_window_ += bytes_per_frame_;
- return true;
+ int copied = audio_buffer_.ReadFrames(1, dest_offset, dest);
+ DCHECK_EQ(copied, 1);
+ CrossfadeFrame(crossfade_buffer_.get(),
+ offset_into_buffer,
+ dest,
+ dest_offset,
+ offset_into_buffer);
+ index_into_window_ += copied;
+ return copied;
}
-bool AudioRendererAlgorithm::OutputSlowerPlayback(uint8* dest,
- int input_step,
- int output_step) {
+int AudioRendererAlgorithm::OutputSlowerPlayback(AudioBus* dest,
+ int dest_offset,
+ int requested_frames,
+ int input_step,
+ int output_step) {
// Ensure we don't run into OOB read/write situation.
CHECK_LT(input_step, output_step);
DCHECK_LT(index_into_window_, window_size_);
DCHECK_LT(playback_rate_, 1.0);
- DCHECK_NE(playback_rate_, 0.0);
+ DCHECK_NE(playback_rate_, 0);
DCHECK(!muted_);
- if (audio_buffer_.forward_bytes() < bytes_per_frame_)
- return false;
+ if (audio_buffer_.frames() < 1)
+ return 0;
// The audio data is output in a series of windows. For slowed down playback,
// the window is comprised of the following phases:
@@ -246,11 +268,10 @@ bool AudioRendererAlgorithm::OutputSlowerPlayback(uint8* dest,
//
// The duration of each phase is computed below based on the |window_size_|
// and |playback_rate_|.
- int bytes_to_crossfade = bytes_in_crossfade_;
- DCHECK_LE(bytes_to_crossfade, input_step);
+ DCHECK_LE(frames_in_crossfade_, input_step);
// This is the index of the end of phase a, beginning of phase b.
- int intro_crossfade_begin = input_step - bytes_to_crossfade;
+ int intro_crossfade_begin = input_step - frames_in_crossfade_;
// This is the index of the end of phase b, beginning of phase c.
int intro_crossfade_end = input_step;
@@ -258,117 +279,74 @@ bool AudioRendererAlgorithm::OutputSlowerPlayback(uint8* dest,
// This is the index of the end of phase c, beginning of phase d.
// This phase continues until |index_into_window_| reaches |window_size_|, at
// which point the window restarts.
- int outtro_crossfade_begin = output_step - bytes_to_crossfade;
+ int outtro_crossfade_begin = output_step - frames_in_crossfade_;
- // a) Output a raw frame.
+ // a) Output raw frames.
if (index_into_window_ < intro_crossfade_begin) {
- CopyWithAdvance(dest);
- index_into_window_ += bytes_per_frame_;
- return true;
+ // Read as many frames as we can and return the count. If it's not enough,
+ // we will get called again.
+ const int frames_to_copy =
+ std::min(requested_frames, intro_crossfade_begin - index_into_window_);
+ int copied = audio_buffer_.ReadFrames(frames_to_copy, dest_offset, dest);
+ index_into_window_ += copied;
+ return copied;
}
- // b) Save the raw frame for the intro crossfade section, then output the
- // frame to |dest|.
+ // b) Save the raw frames for the intro crossfade section, then copy the
+ // same frames to |dest|.
if (index_into_window_ < intro_crossfade_end) {
+ const int frames_to_copy =
+ std::min(requested_frames, intro_crossfade_end - index_into_window_);
int offset = index_into_window_ - intro_crossfade_begin;
- uint8* place_to_copy = crossfade_buffer_.get() + offset;
- CopyWithoutAdvance(place_to_copy);
- CopyWithAdvance(dest);
- index_into_window_ += bytes_per_frame_;
- return true;
+ int copied = audio_buffer_.ReadFrames(
+ frames_to_copy, offset, crossfade_buffer_.get());
+ crossfade_buffer_->CopyPartialFramesTo(offset, copied, dest_offset, dest);
+ index_into_window_ += copied;
+ return copied;
}
- int audio_buffer_offset = index_into_window_ - intro_crossfade_end;
-
- if (audio_buffer_.forward_bytes() < audio_buffer_offset + bytes_per_frame_)
- return false;
-
// c) Output a raw frame into |dest| without advancing the |audio_buffer_|
- // cursor. See function-level comment.
- DCHECK_GE(index_into_window_, intro_crossfade_end);
- CopyWithoutAdvance(dest, audio_buffer_offset);
+ // cursor.
+ int audio_buffer_offset = index_into_window_ - intro_crossfade_end;
+ DCHECK_GE(audio_buffer_offset, 0);
+ if (audio_buffer_.frames() <= audio_buffer_offset)
+ return 0;
+ int copied =
+ audio_buffer_.PeekFrames(1, audio_buffer_offset, dest_offset, dest);
+ DCHECK_EQ(1, copied);
// d) Crossfade the next frame of |crossfade_buffer_| into |dest| if we've
// reached the outtro crossfade section of the window.
if (index_into_window_ >= outtro_crossfade_begin) {
int offset_into_crossfade_buffer =
index_into_window_ - outtro_crossfade_begin;
- uint8* intro_frame_ptr =
- crossfade_buffer_.get() + offset_into_crossfade_buffer;
- OutputCrossfadedFrame(dest, intro_frame_ptr);
+ CrossfadeFrame(dest,
+ dest_offset,
+ crossfade_buffer_.get(),
+ offset_into_crossfade_buffer,
+ offset_into_crossfade_buffer);
}
- index_into_window_ += bytes_per_frame_;
- return true;
-}
-
-bool AudioRendererAlgorithm::OutputNormalPlayback(uint8* dest) {
- if (audio_buffer_.forward_bytes() >= bytes_per_frame_) {
- CopyWithAdvance(dest);
- index_into_window_ += bytes_per_frame_;
- return true;
- }
- return false;
-}
-
-void AudioRendererAlgorithm::CopyWithAdvance(uint8* dest) {
- CopyWithoutAdvance(dest);
- DropFrame();
+ index_into_window_ += copied;
+ return copied;
}
-void AudioRendererAlgorithm::CopyWithoutAdvance(uint8* dest) {
- CopyWithoutAdvance(dest, 0);
-}
-
-void AudioRendererAlgorithm::CopyWithoutAdvance(
- uint8* dest, int offset) {
- DCHECK(!muted_);
- int copied = audio_buffer_.Peek(dest, bytes_per_frame_, offset);
- DCHECK_EQ(bytes_per_frame_, copied);
-}
-
-void AudioRendererAlgorithm::DropFrame() {
- audio_buffer_.Seek(bytes_per_frame_);
-}
-
-void AudioRendererAlgorithm::OutputCrossfadedFrame(
- uint8* outtro, const uint8* intro) {
- DCHECK_LE(index_into_window_, window_size_);
- DCHECK(!muted_);
-
- switch (bytes_per_channel_) {
- case 4:
- CrossfadeFrame<int32>(outtro, intro);
- break;
- case 2:
- CrossfadeFrame<int16>(outtro, intro);
- break;
- case 1:
- CrossfadeFrame<uint8>(outtro, intro);
- break;
- default:
- NOTREACHED() << "Unsupported audio bit depth in crossfade.";
- }
-}
-
-template <class Type>
-void AudioRendererAlgorithm::CrossfadeFrame(
- uint8* outtro_bytes, const uint8* intro_bytes) {
- Type* outtro = reinterpret_cast<Type*>(outtro_bytes);
- const Type* intro = reinterpret_cast<const Type*>(intro_bytes);
-
- int frames_in_crossfade = bytes_in_crossfade_ / bytes_per_frame_;
+void AudioRendererAlgorithm::CrossfadeFrame(AudioBus* intro,
+ int intro_offset,
+ AudioBus* outtro,
+ int outtro_offset,
+ int fade_offset) {
float crossfade_ratio =
- static_cast<float>(crossfade_frame_number_) / frames_in_crossfade;
+ static_cast<float>(fade_offset) / frames_in_crossfade_;
for (int channel = 0; channel < channels_; ++channel) {
- *outtro *= 1.0 - crossfade_ratio;
- *outtro++ += (*intro++) * crossfade_ratio;
+ outtro->channel(channel)[outtro_offset] =
+ (1.0f - crossfade_ratio) * intro->channel(channel)[intro_offset] +
+ (crossfade_ratio) * outtro->channel(channel)[outtro_offset];
}
- crossfade_frame_number_++;
}
void AudioRendererAlgorithm::SetPlaybackRate(float new_rate) {
- DCHECK_GE(new_rate, 0.0);
+ DCHECK_GE(new_rate, 0);
playback_rate_ = new_rate;
muted_ =
playback_rate_ < kMinPlaybackRate || playback_rate_ > kMaxPlaybackRate;
@@ -376,10 +354,6 @@ void AudioRendererAlgorithm::SetPlaybackRate(float new_rate) {
ResetWindow();
}
-void AudioRendererAlgorithm::AlignToFrameBoundary(int* value) {
- (*value) -= ((*value) % bytes_per_frame_);
-}
-
void AudioRendererAlgorithm::FlushBuffers() {
ResetWindow();
@@ -392,22 +366,17 @@ base::TimeDelta AudioRendererAlgorithm::GetTime() {
}
void AudioRendererAlgorithm::EnqueueBuffer(
- const scoped_refptr<DataBuffer>& buffer_in) {
+ const scoped_refptr<AudioBuffer>& buffer_in) {
DCHECK(!buffer_in->end_of_stream());
audio_buffer_.Append(buffer_in);
}
bool AudioRendererAlgorithm::IsQueueFull() {
- return audio_buffer_.forward_bytes() >= audio_buffer_.forward_capacity();
-}
-
-int AudioRendererAlgorithm::QueueCapacity() {
- return audio_buffer_.forward_capacity();
+ return audio_buffer_.frames() >= capacity_;
}
void AudioRendererAlgorithm::IncreaseQueueCapacity() {
- audio_buffer_.set_forward_capacity(
- std::min(2 * audio_buffer_.forward_capacity(), kMaxBufferSizeInBytes));
+ capacity_ = std::min(2 * capacity_, kMaxBufferSizeInFrames);
}
} // namespace media
diff --git a/media/filters/audio_renderer_algorithm.h b/media/filters/audio_renderer_algorithm.h
index a287b5c..26790b9 100644
--- a/media/filters/audio_renderer_algorithm.h
+++ b/media/filters/audio_renderer_algorithm.h
@@ -24,11 +24,12 @@
#include "base/memory/ref_counted.h"
#include "base/memory/scoped_ptr.h"
#include "media/audio/audio_parameters.h"
-#include "media/base/seekable_buffer.h"
+#include "media/base/audio_buffer.h"
+#include "media/base/audio_buffer_queue.h"
namespace media {
-class DataBuffer;
+class AudioBus;
class MEDIA_EXPORT AudioRendererAlgorithm {
public:
@@ -44,9 +45,9 @@ class MEDIA_EXPORT AudioRendererAlgorithm {
//
// Data from |audio_buffer_| is consumed in proportion to the playback rate.
//
- // Returns the number of frames copied into |dest|.
- // May request more reads via |request_read_cb_| before returning.
- int FillBuffer(uint8* dest, int requested_frames);
+ // Returns the number of frames copied into |dest|. May request more reads via
+ // |request_read_cb_| before returning.
+ int FillBuffer(AudioBus* dest, int requested_frames);
// Clears |audio_buffer_|.
void FlushBuffers();
@@ -57,7 +58,7 @@ class MEDIA_EXPORT AudioRendererAlgorithm {
// Enqueues a buffer. It is called from the owner of the algorithm after a
// read completes.
- void EnqueueBuffer(const scoped_refptr<DataBuffer>& buffer_in);
+ void EnqueueBuffer(const scoped_refptr<AudioBuffer>& buffer_in);
float playback_rate() const { return playback_rate_; }
void SetPlaybackRate(float new_rate);
@@ -65,42 +66,42 @@ class MEDIA_EXPORT AudioRendererAlgorithm {
// Returns true if |audio_buffer_| is at or exceeds capacity.
bool IsQueueFull();
- // Returns the capacity of |audio_buffer_|.
- int QueueCapacity();
+ // Returns the capacity of |audio_buffer_| in frames.
+ int QueueCapacity() const { return capacity_; }
// Increase the capacity of |audio_buffer_| if possible.
void IncreaseQueueCapacity();
- // Returns the number of bytes left in |audio_buffer_|, which may be larger
+ // Returns the number of frames left in |audio_buffer_|, which may be larger
// than QueueCapacity() in the event that EnqueueBuffer() delivered more data
// than |audio_buffer_| was intending to hold.
- int bytes_buffered() { return audio_buffer_.forward_bytes(); }
-
- int bytes_per_frame() { return bytes_per_frame_; }
-
- int bytes_per_channel() { return bytes_per_channel_; }
+ int frames_buffered() { return audio_buffer_.frames(); }
+ // Returns the samples per second for this audio stream.
int samples_per_second() { return samples_per_second_; }
+ // Is the sound currently muted?
bool is_muted() { return muted_; }
private:
- // Fills |dest| with one frame of audio data at normal speed. Returns true if
- // a frame was rendered, false otherwise.
- bool OutputNormalPlayback(uint8* dest);
-
- // Fills |dest| with one frame of audio data at faster than normal speed.
- // Returns true if a frame was rendered, false otherwise.
+ // Fills |dest| with up to |requested_frames| frames of audio data at faster
+ // than normal speed. Returns the number of frames inserted into |dest|. If
+ // not enough data available, returns 0.
//
// When the audio playback is > 1.0, we use a variant of Overlap-Add to squish
// audio output while preserving pitch. Essentially, we play a bit of audio
// data at normal speed, then we "fast forward" by dropping the next bit of
// audio data, and then we stich the pieces together by crossfading from one
// audio chunk to the next.
- bool OutputFasterPlayback(uint8* dest, int input_step, int output_step);
-
- // Fills |dest| with one frame of audio data at slower than normal speed.
- // Returns true if a frame was rendered, false otherwise.
+ int OutputFasterPlayback(AudioBus* dest,
+ int dest_offset,
+ int requested_frames,
+ int input_step,
+ int output_step);
+
+ // Fills |dest| with up to |requested_frames| frames of audio data at slower
+ // than normal speed. Returns the number of frames inserted into |dest|. If
+ // not enough data available, returns 0.
//
// When the audio playback is < 1.0, we use a variant of Overlap-Add to
// stretch audio output while preserving pitch. This works by outputting a
@@ -108,32 +109,21 @@ class MEDIA_EXPORT AudioRendererAlgorithm {
// by repeating some of the audio data from the previous audio segment.
// Segments are stiched together by crossfading from one audio chunk to the
// next.
- bool OutputSlowerPlayback(uint8* dest, int input_step, int output_step);
+ int OutputSlowerPlayback(AudioBus* dest,
+ int dest_offset,
+ int requested_frames,
+ int input_step,
+ int output_step);
// Resets the window state to the start of a new window.
void ResetWindow();
- // Copies a raw frame from |audio_buffer_| into |dest| without progressing
- // |audio_buffer_|'s internal "current" cursor. Optionally peeks at a forward
- // byte |offset|.
- void CopyWithoutAdvance(uint8* dest);
- void CopyWithoutAdvance(uint8* dest, int offset);
-
- // Copies a raw frame from |audio_buffer_| into |dest| and progresses the
- // |audio_buffer_| forward.
- void CopyWithAdvance(uint8* dest);
-
- // Moves the |audio_buffer_| forward by one frame.
- void DropFrame();
-
// Does a linear crossfade from |intro| into |outtro| for one frame.
- // Assumes pointers are valid and are at least size of |bytes_per_frame_|.
- void OutputCrossfadedFrame(uint8* outtro, const uint8* intro);
- template <class Type>
- void CrossfadeFrame(uint8* outtro, const uint8* intro);
-
- // Rounds |*value| down to the nearest frame boundary.
- void AlignToFrameBoundary(int* value);
+ void CrossfadeFrame(AudioBus* intro,
+ int intro_offset,
+ AudioBus* outtro,
+ int outtro_offset,
+ int fade_offset);
// Number of channels in audio stream.
int channels_;
@@ -141,24 +131,18 @@ class MEDIA_EXPORT AudioRendererAlgorithm {
// Sample rate of audio stream.
int samples_per_second_;
- // Byte depth of audio.
- int bytes_per_channel_;
-
// Used by algorithm to scale output.
float playback_rate_;
// Buffered audio data.
- SeekableBuffer audio_buffer_;
+ AudioBufferQueue audio_buffer_;
- // Length for crossfade in bytes.
- int bytes_in_crossfade_;
-
- // Length of frame in bytes.
- int bytes_per_frame_;
+ // Length for crossfade in frames.
+ int frames_in_crossfade_;
// The current location in the audio window, between 0 and |window_size_|.
// When |index_into_window_| reaches |window_size_|, the window resets.
- // Indexed by byte.
+ // Indexed by frame.
int index_into_window_;
// The frame number in the crossfade.
@@ -167,12 +151,18 @@ class MEDIA_EXPORT AudioRendererAlgorithm {
// True if the audio should be muted.
bool muted_;
+ // If muted, keep track of partial frames that should have been skipped over.
+ double muted_partial_frame_;
+
// Temporary buffer to hold crossfade data.
- scoped_ptr<uint8[]> crossfade_buffer_;
+ scoped_ptr<AudioBus> crossfade_buffer_;
- // Window size, in bytes (calculated from audio properties).
+ // Window size, in frames (calculated from audio properties).
int window_size_;
+ // How many frames to have in the queue before we report the queue is full.
+ int capacity_;
+
DISALLOW_COPY_AND_ASSIGN(AudioRendererAlgorithm);
};
diff --git a/media/filters/audio_renderer_algorithm_unittest.cc b/media/filters/audio_renderer_algorithm_unittest.cc
index 31a6ce7..d5119c0 100644
--- a/media/filters/audio_renderer_algorithm_unittest.cc
+++ b/media/filters/audio_renderer_algorithm_unittest.cc
@@ -12,69 +12,106 @@
#include "base/bind.h"
#include "base/callback.h"
+#include "media/base/audio_buffer.h"
+#include "media/base/audio_bus.h"
+#include "media/base/buffers.h"
#include "media/base/channel_layout.h"
-#include "media/base/data_buffer.h"
+#include "media/base/test_helpers.h"
#include "media/filters/audio_renderer_algorithm.h"
#include "testing/gtest/include/gtest/gtest.h"
namespace media {
-static const size_t kRawDataSize = 2048;
+static const int kFrameSize = 250;
static const int kSamplesPerSecond = 3000;
-static const ChannelLayout kDefaultChannelLayout = CHANNEL_LAYOUT_STEREO;
-static const int kDefaultSampleBits = 16;
+static const SampleFormat kSampleFormat = kSampleFormatS16;
class AudioRendererAlgorithmTest : public testing::Test {
public:
AudioRendererAlgorithmTest()
- : bytes_enqueued_(0) {
+ : frames_enqueued_(0),
+ channels_(0),
+ sample_format_(kUnknownSampleFormat),
+ bytes_per_sample_(0) {
}
virtual ~AudioRendererAlgorithmTest() {}
void Initialize() {
- Initialize(kDefaultChannelLayout, kDefaultSampleBits, kSamplesPerSecond);
+ Initialize(CHANNEL_LAYOUT_STEREO, kSampleFormatS16, 3000);
}
- void Initialize(ChannelLayout channel_layout, int bits_per_channel,
+ void Initialize(ChannelLayout channel_layout,
+ SampleFormat sample_format,
int samples_per_second) {
- AudioParameters params(
- media::AudioParameters::AUDIO_PCM_LINEAR, channel_layout,
- samples_per_second, bits_per_channel, samples_per_second / 100);
-
+ channels_ = ChannelLayoutToChannelCount(channel_layout);
+ sample_format_ = sample_format;
+ bytes_per_sample_ = SampleFormatToBytesPerChannel(sample_format);
+ AudioParameters params(media::AudioParameters::AUDIO_PCM_LINEAR,
+ channel_layout,
+ samples_per_second,
+ bytes_per_sample_ * 8,
+ samples_per_second / 100);
algorithm_.Initialize(1, params);
FillAlgorithmQueue();
}
void FillAlgorithmQueue() {
+ // The value of the data is meaningless; we just want non-zero data to
+ // differentiate it from muted data.
+ scoped_refptr<AudioBuffer> buffer;
while (!algorithm_.IsQueueFull()) {
- scoped_ptr<uint8[]> audio_data(new uint8[kRawDataSize]);
- CHECK_EQ(kRawDataSize % algorithm_.bytes_per_channel(), 0u);
- CHECK_EQ(kRawDataSize % algorithm_.bytes_per_frame(), 0u);
- // The value of the data is meaningless; we just want non-zero data to
- // differentiate it from muted data.
- memset(audio_data.get(), 1, kRawDataSize);
- algorithm_.EnqueueBuffer(new DataBuffer(audio_data.Pass(), kRawDataSize));
- bytes_enqueued_ += kRawDataSize;
+ switch (sample_format_) {
+ case kSampleFormatU8:
+ buffer = MakeInterleavedAudioBuffer<uint8>(sample_format_,
+ channels_,
+ 1,
+ 1,
+ kFrameSize,
+ kNoTimestamp(),
+ kNoTimestamp());
+ break;
+ case kSampleFormatS16:
+ buffer = MakeInterleavedAudioBuffer<int16>(sample_format_,
+ channels_,
+ 1,
+ 1,
+ kFrameSize,
+ kNoTimestamp(),
+ kNoTimestamp());
+ break;
+ case kSampleFormatS32:
+ buffer = MakeInterleavedAudioBuffer<int32>(sample_format_,
+ channels_,
+ 1,
+ 1,
+ kFrameSize,
+ kNoTimestamp(),
+ kNoTimestamp());
+ break;
+ default:
+ NOTREACHED() << "Unrecognized format " << sample_format_;
+ }
+ algorithm_.EnqueueBuffer(buffer);
+ frames_enqueued_ += kFrameSize;
}
}
- void CheckFakeData(uint8* audio_data, int frames_written) {
- int sum = 0;
- for (int i = 0; i < frames_written * algorithm_.bytes_per_frame(); ++i)
- sum |= audio_data[i];
-
- if (algorithm_.is_muted())
- ASSERT_EQ(sum, 0);
- else
- ASSERT_NE(sum, 0);
+ void CheckFakeData(AudioBus* audio_data, int frames_written) {
+ // Check each channel individually.
+ for (int ch = 0; ch < channels_; ++ch) {
+ bool all_zero = true;
+ for (int i = 0; i < frames_written && all_zero; ++i)
+ all_zero = audio_data->channel(ch)[i] == 0.0f;
+ ASSERT_EQ(algorithm_.is_muted(), all_zero) << " for channel " << ch;
+ }
}
- int ComputeConsumedBytes(int initial_bytes_enqueued,
- int initial_bytes_buffered) {
- int byte_delta = bytes_enqueued_ - initial_bytes_enqueued;
- int buffered_delta = algorithm_.bytes_buffered() - initial_bytes_buffered;
- int consumed = byte_delta - buffered_delta;
+ int ComputeConsumedFrames(int initial_frames_enqueued,
+ int initial_frames_buffered) {
+ int frame_delta = frames_enqueued_ - initial_frames_enqueued;
+ int buffered_delta = algorithm_.frames_buffered() - initial_frames_buffered;
+ int consumed = frame_delta - buffered_delta;
CHECK_GE(consumed, 0);
return consumed;
}
@@ -83,24 +120,22 @@ class AudioRendererAlgorithmTest : public testing::Test {
const int kDefaultBufferSize = algorithm_.samples_per_second() / 100;
const int kDefaultFramesRequested = 2 * algorithm_.samples_per_second();
- TestPlaybackRate(playback_rate, kDefaultBufferSize,
- kDefaultFramesRequested);
+ TestPlaybackRate(
+ playback_rate, kDefaultBufferSize, kDefaultFramesRequested);
}
void TestPlaybackRate(double playback_rate,
int buffer_size_in_frames,
int total_frames_requested) {
- int initial_bytes_enqueued = bytes_enqueued_;
- int initial_bytes_buffered = algorithm_.bytes_buffered();
-
+ int initial_frames_enqueued = frames_enqueued_;
+ int initial_frames_buffered = algorithm_.frames_buffered();
algorithm_.SetPlaybackRate(static_cast<float>(playback_rate));
- scoped_ptr<uint8[]> buffer(
- new uint8[buffer_size_in_frames * algorithm_.bytes_per_frame()]);
-
+ scoped_ptr<AudioBus> bus =
+ AudioBus::Create(channels_, buffer_size_in_frames);
if (playback_rate == 0.0) {
int frames_written =
- algorithm_.FillBuffer(buffer.get(), buffer_size_in_frames);
+ algorithm_.FillBuffer(bus.get(), buffer_size_in_frames);
EXPECT_EQ(0, frames_written);
return;
}
@@ -108,23 +143,22 @@ class AudioRendererAlgorithmTest : public testing::Test {
int frames_remaining = total_frames_requested;
while (frames_remaining > 0) {
int frames_requested = std::min(buffer_size_in_frames, frames_remaining);
- int frames_written =
- algorithm_.FillBuffer(buffer.get(), frames_requested);
- ASSERT_GT(frames_written, 0);
- CheckFakeData(buffer.get(), frames_written);
+ int frames_written = algorithm_.FillBuffer(bus.get(), frames_requested);
+ ASSERT_GT(frames_written, 0) << "Requested: " << frames_requested
+ << ", playing at " << playback_rate;
+ CheckFakeData(bus.get(), frames_written);
frames_remaining -= frames_written;
FillAlgorithmQueue();
}
- int bytes_requested = total_frames_requested * algorithm_.bytes_per_frame();
- int bytes_consumed = ComputeConsumedBytes(initial_bytes_enqueued,
- initial_bytes_buffered);
+ int frames_consumed =
+ ComputeConsumedFrames(initial_frames_enqueued, initial_frames_buffered);
// If playing back at normal speed, we should always get back the same
// number of bytes requested.
if (playback_rate == 1.0) {
- EXPECT_EQ(bytes_requested, bytes_consumed);
+ EXPECT_EQ(total_frames_requested, frames_consumed);
return;
}
@@ -136,19 +170,17 @@ class AudioRendererAlgorithmTest : public testing::Test {
// down playback, and for playback_rate > 1, playback rate generally gets
// less and less accurate the farther it drifts from 1 (though this is
// nonlinear).
- static const double kMaxAcceptableDelta = 0.01;
- double actual_playback_rate = 1.0 * bytes_consumed / bytes_requested;
-
- // Calculate the percentage difference from the target |playback_rate| as a
- // fraction from 0.0 to 1.0.
- double delta = std::abs(1.0 - (actual_playback_rate / playback_rate));
-
- EXPECT_LE(delta, kMaxAcceptableDelta);
+ double actual_playback_rate =
+ 1.0 * frames_consumed / total_frames_requested;
+ EXPECT_NEAR(playback_rate, actual_playback_rate, playback_rate / 100.0);
}
protected:
AudioRendererAlgorithm algorithm_;
- int bytes_enqueued_;
+ int frames_enqueued_;
+ int channels_;
+ SampleFormat sample_format_;
+ int bytes_per_sample_;
};
TEST_F(AudioRendererAlgorithmTest, FillBuffer_NormalRate) {
@@ -245,25 +277,21 @@ TEST_F(AudioRendererAlgorithmTest, FillBuffer_SmallBufferSize) {
}
TEST_F(AudioRendererAlgorithmTest, FillBuffer_LargeBufferSize) {
- Initialize(kDefaultChannelLayout, kDefaultSampleBits, 44100);
+ Initialize(CHANNEL_LAYOUT_STEREO, kSampleFormatS16, 44100);
TestPlaybackRate(1.0);
TestPlaybackRate(0.5);
TestPlaybackRate(1.5);
}
TEST_F(AudioRendererAlgorithmTest, FillBuffer_LowerQualityAudio) {
- static const ChannelLayout kChannelLayout = CHANNEL_LAYOUT_MONO;
- static const int kSampleBits = 8;
- Initialize(kChannelLayout, kSampleBits, kSamplesPerSecond);
+ Initialize(CHANNEL_LAYOUT_MONO, kSampleFormatU8, kSamplesPerSecond);
TestPlaybackRate(1.0);
TestPlaybackRate(0.5);
TestPlaybackRate(1.5);
}
TEST_F(AudioRendererAlgorithmTest, FillBuffer_HigherQualityAudio) {
- static const ChannelLayout kChannelLayout = CHANNEL_LAYOUT_STEREO;
- static const int kSampleBits = 32;
- Initialize(kChannelLayout, kSampleBits, kSamplesPerSecond);
+ Initialize(CHANNEL_LAYOUT_STEREO, kSampleFormatS32, kSamplesPerSecond);
TestPlaybackRate(1.0);
TestPlaybackRate(0.5);
TestPlaybackRate(1.5);
diff --git a/media/filters/audio_renderer_impl.cc b/media/filters/audio_renderer_impl.cc
index 8a309cb..bcf3cb7 100644
--- a/media/filters/audio_renderer_impl.cc
+++ b/media/filters/audio_renderer_impl.cc
@@ -15,9 +15,9 @@
#include "base/message_loop/message_loop_proxy.h"
#include "base/metrics/histogram.h"
#include "media/audio/audio_util.h"
+#include "media/base/audio_buffer.h"
#include "media/base/audio_splicer.h"
#include "media/base/bind_to_loop.h"
-#include "media/base/data_buffer.h"
#include "media/base/demuxer_stream.h"
#include "media/filters/audio_decoder_selector.h"
#include "media/filters/decrypting_demuxer_stream.h"
@@ -59,8 +59,7 @@ AudioRendererImpl::AudioRendererImpl(
current_time_(kNoTimestamp()),
underflow_disabled_(false),
increase_preroll_on_underflow_(increase_preroll_on_underflow),
- preroll_aborted_(false),
- actual_frames_per_buffer_(0) {
+ preroll_aborted_(false) {
}
AudioRendererImpl::~AudioRendererImpl() {
@@ -263,9 +262,7 @@ void AudioRendererImpl::OnDecoderSelected(
return;
}
- int channels = ChannelLayoutToChannelCount(decoder_->channel_layout());
- int bytes_per_frame = channels * decoder_->bits_per_channel() / 8;
- splicer_.reset(new AudioSplicer(bytes_per_frame, sample_rate));
+ splicer_.reset(new AudioSplicer(sample_rate));
// We're all good! Continue initializing the rest of the audio renderer based
// on the decoder format.
@@ -311,7 +308,7 @@ void AudioRendererImpl::SetVolume(float volume) {
void AudioRendererImpl::DecodedAudioReady(
AudioDecoder::Status status,
- const scoped_refptr<DataBuffer>& buffer) {
+ const scoped_refptr<AudioBuffer>& buffer) {
DCHECK(message_loop_->BelongsToCurrentThread());
base::AutoLock auto_lock(lock_);
@@ -355,7 +352,7 @@ void AudioRendererImpl::DecodedAudioReady(
}
bool AudioRendererImpl::HandleSplicerBuffer(
- const scoped_refptr<DataBuffer>& buffer) {
+ const scoped_refptr<AudioBuffer>& buffer) {
if (buffer->end_of_stream()) {
received_end_of_stream_ = true;
@@ -454,32 +451,20 @@ void AudioRendererImpl::SetPlaybackRate(float playback_rate) {
}
bool AudioRendererImpl::IsBeforePrerollTime(
- const scoped_refptr<DataBuffer>& buffer) {
+ const scoped_refptr<AudioBuffer>& buffer) {
return (state_ == kPrerolling) && buffer.get() && !buffer->end_of_stream() &&
(buffer->timestamp() + buffer->duration()) < preroll_timestamp_;
}
int AudioRendererImpl::Render(AudioBus* audio_bus,
int audio_delay_milliseconds) {
- if (actual_frames_per_buffer_ != audio_bus->frames()) {
- audio_buffer_.reset(
- new uint8[audio_bus->frames() * audio_parameters_.GetBytesPerFrame()]);
- actual_frames_per_buffer_ = audio_bus->frames();
- }
-
- int frames_filled = FillBuffer(
- audio_buffer_.get(), audio_bus->frames(), audio_delay_milliseconds);
- DCHECK_LE(frames_filled, actual_frames_per_buffer_);
-
- // Deinterleave audio data into the output bus.
- audio_bus->FromInterleaved(
- audio_buffer_.get(), frames_filled,
- audio_parameters_.bits_per_sample() / 8);
-
+ int frames_filled =
+ FillBuffer(audio_bus, audio_bus->frames(), audio_delay_milliseconds);
+ DCHECK_LE(frames_filled, audio_bus->frames());
return frames_filled;
}
-uint32 AudioRendererImpl::FillBuffer(uint8* dest,
+uint32 AudioRendererImpl::FillBuffer(AudioBus* dest,
uint32 requested_frames,
int audio_delay_milliseconds) {
base::TimeDelta current_time = kNoTimestamp();
diff --git a/media/filters/audio_renderer_impl.h b/media/filters/audio_renderer_impl.h
index 43c106d..56501fd 100644
--- a/media/filters/audio_renderer_impl.h
+++ b/media/filters/audio_renderer_impl.h
@@ -36,6 +36,7 @@ class MessageLoopProxy;
namespace media {
+class AudioBus;
class AudioDecoderSelector;
class AudioSplicer;
class DecryptingDemuxerStream;
@@ -98,11 +99,11 @@ class MEDIA_EXPORT AudioRendererImpl
// Callback from the audio decoder delivering decoded audio samples.
void DecodedAudioReady(AudioDecoder::Status status,
- const scoped_refptr<DataBuffer>& buffer);
+ const scoped_refptr<AudioBuffer>& buffer);
// Handles buffers that come out of |splicer_|.
// Returns true if more buffers are needed.
- bool HandleSplicerBuffer(const scoped_refptr<DataBuffer>& buffer);
+ bool HandleSplicerBuffer(const scoped_refptr<AudioBuffer>& buffer);
// Helper functions for AudioDecoder::Status values passed to
// DecodedAudioReady().
@@ -125,7 +126,7 @@ class MEDIA_EXPORT AudioRendererImpl
// should the filled buffer be played.
//
// Safe to call on any thread.
- uint32 FillBuffer(uint8* dest,
+ uint32 FillBuffer(AudioBus* dest,
uint32 requested_frames,
int audio_delay_milliseconds);
@@ -155,7 +156,7 @@ class MEDIA_EXPORT AudioRendererImpl
// Returns true if the data in the buffer is all before
// |preroll_timestamp_|. This can only return true while
// in the kPrerolling state.
- bool IsBeforePrerollTime(const scoped_refptr<DataBuffer>& buffer);
+ bool IsBeforePrerollTime(const scoped_refptr<AudioBuffer>& buffer);
// Called when |decoder_selector_| has selected |decoder| or is null if no
// decoder could be selected.
@@ -268,10 +269,6 @@ class MEDIA_EXPORT AudioRendererImpl
// End variables which must be accessed under |lock_|. ----------------------
- // Variables used only on the audio thread. ---------------------------------
- int actual_frames_per_buffer_;
- scoped_ptr<uint8[]> audio_buffer_;
-
DISALLOW_COPY_AND_ASSIGN(AudioRendererImpl);
};
diff --git a/media/filters/audio_renderer_impl_unittest.cc b/media/filters/audio_renderer_impl_unittest.cc
index 3298062..ccf60f5 100644
--- a/media/filters/audio_renderer_impl_unittest.cc
+++ b/media/filters/audio_renderer_impl_unittest.cc
@@ -9,8 +9,8 @@
#include "base/message_loop.h"
#include "base/stl_util.h"
#include "base/strings/stringprintf.h"
+#include "media/base/audio_buffer.h"
#include "media/base/audio_timestamp_helper.h"
-#include "media/base/data_buffer.h"
#include "media/base/gmock_callback_support.h"
#include "media/base/mock_audio_renderer_sink.h"
#include "media/base/mock_filters.h"
@@ -30,10 +30,17 @@ using ::testing::StrictMock;
namespace media {
+// Constants to specify the type of audio data used.
+static AudioCodec kCodec = kCodecVorbis;
+static SampleFormat kSampleFormat = kSampleFormatPlanarF32;
+static ChannelLayout kChannelLayout = CHANNEL_LAYOUT_STEREO;
+static int kChannels = ChannelLayoutToChannelCount(kChannelLayout);
+static int kSamplesPerSecond = 44100;
+
// Constants for distinguishing between muted audio and playing audio when using
-// ConsumeBufferedData().
-static uint8 kMutedAudio = 0x00;
-static uint8 kPlayingAudio = 0x99;
+// ConsumeBufferedData(). Must match the type needed by kSampleFormat.
+static float kMutedAudio = 0.0f;
+static float kPlayingAudio = 0.5f;
class AudioRendererImplTest : public ::testing::Test {
public:
@@ -41,8 +48,13 @@ class AudioRendererImplTest : public ::testing::Test {
AudioRendererImplTest()
: demuxer_stream_(DemuxerStream::AUDIO),
decoder_(new MockAudioDecoder()) {
- AudioDecoderConfig audio_config(kCodecVorbis, kSampleFormatPlanarF32,
- CHANNEL_LAYOUT_STEREO, 44100, NULL, 0, false);
+ AudioDecoderConfig audio_config(kCodec,
+ kSampleFormat,
+ kChannelLayout,
+ kSamplesPerSecond,
+ NULL,
+ 0,
+ false);
demuxer_stream_.set_audio_decoder_config(audio_config);
// Used to save callbacks and run them at a later time.
@@ -53,7 +65,7 @@ class AudioRendererImplTest : public ::testing::Test {
EXPECT_CALL(*decoder_, bits_per_channel())
.WillRepeatedly(Return(audio_config.bits_per_channel()));
EXPECT_CALL(*decoder_, channel_layout())
- .WillRepeatedly(Return(CHANNEL_LAYOUT_MONO));
+ .WillRepeatedly(Return(audio_config.channel_layout()));
EXPECT_CALL(*decoder_, samples_per_second())
.WillRepeatedly(Return(audio_config.samples_per_second()));
@@ -110,10 +122,8 @@ class AudioRendererImplTest : public ::testing::Test {
InitializeWithStatus(PIPELINE_OK);
- int channels = ChannelLayoutToChannelCount(decoder_->channel_layout());
- int bytes_per_frame = decoder_->bits_per_channel() * channels / 8;
- next_timestamp_.reset(new AudioTimestampHelper(
- bytes_per_frame, decoder_->samples_per_second()));
+ next_timestamp_.reset(
+ new AudioTimestampHelper(decoder_->samples_per_second()));
}
void InitializeWithStatus(PipelineStatus expected) {
@@ -189,16 +199,19 @@ class AudioRendererImplTest : public ::testing::Test {
DCHECK(wait_for_pending_read_cb_.is_null());
}
- // Delivers |size| bytes with value kPlayingAudio to |renderer_|.
+ // Delivers |size| frames with value kPlayingAudio to |renderer_|.
void SatisfyPendingRead(size_t size) {
CHECK(!read_cb_.is_null());
- scoped_refptr<DataBuffer> buffer = new DataBuffer(size);
- buffer->set_data_size(size);
- memset(buffer->writable_data(), kPlayingAudio, buffer->data_size());
- buffer->set_timestamp(next_timestamp_->GetTimestamp());
- buffer->set_duration(next_timestamp_->GetDuration(buffer->data_size()));
- next_timestamp_->AddBytes(buffer->data_size());
+ scoped_refptr<AudioBuffer> buffer =
+ MakePlanarAudioBuffer<float>(kSampleFormat,
+ kChannels,
+ kPlayingAudio,
+ 0.0f,
+ size,
+ next_timestamp_->GetTimestamp(),
+ next_timestamp_->GetFrameDuration(size));
+ next_timestamp_->AddFrames(size);
DeliverBuffer(AudioDecoder::kOk, buffer);
}
@@ -208,31 +221,28 @@ class AudioRendererImplTest : public ::testing::Test {
}
void DeliverEndOfStream() {
- DeliverBuffer(AudioDecoder::kOk, DataBuffer::CreateEOSBuffer());
+ DeliverBuffer(AudioDecoder::kOk, AudioBuffer::CreateEOSBuffer());
}
- // Delivers bytes until |renderer_|'s internal buffer is full and no longer
+ // Delivers frames until |renderer_|'s internal buffer is full and no longer
// has pending reads.
void DeliverRemainingAudio() {
- SatisfyPendingRead(bytes_remaining_in_buffer());
+ SatisfyPendingRead(frames_remaining_in_buffer());
}
- // Attempts to consume |size| bytes from |renderer_|'s internal buffer,
- // returning true if all |size| bytes were consumed, false if less than
- // |size| bytes were consumed.
+ // Attempts to consume |requested_frames| frames from |renderer_|'s internal
+ // buffer, returning true if all |requested_frames| frames were consumed,
+ // false if less than |requested_frames| frames were consumed.
//
- // |muted| is optional and if passed will get set if the byte value of
+ // |muted| is optional and if passed will get set if the value of
// the consumed data is muted audio.
- bool ConsumeBufferedData(uint32 size, bool* muted) {
- scoped_ptr<uint8[]> buffer(new uint8[size]);
- uint32 bytes_per_frame = (decoder_->bits_per_channel() / 8) *
- ChannelLayoutToChannelCount(decoder_->channel_layout());
- uint32 requested_frames = size / bytes_per_frame;
- uint32 frames_read = renderer_->FillBuffer(
- buffer.get(), requested_frames, 0);
+ bool ConsumeBufferedData(uint32 requested_frames, bool* muted) {
+ scoped_ptr<AudioBus> bus =
+ AudioBus::Create(kChannels, std::max(requested_frames, 1u));
+ uint32 frames_read = renderer_->FillBuffer(bus.get(), requested_frames, 0);
if (muted)
- *muted = frames_read < 1 || buffer[0] == kMutedAudio;
+ *muted = frames_read < 1 || bus->channel(0)[0] == kMutedAudio;
return frames_read == requested_frames;
}
@@ -246,9 +256,7 @@ class AudioRendererImplTest : public ::testing::Test {
int total_frames_read = 0;
const int kRequestFrames = 1024;
- const uint32 bytes_per_frame = (decoder_->bits_per_channel() / 8) *
- ChannelLayoutToChannelCount(decoder_->channel_layout());
- scoped_ptr<uint8[]> buffer(new uint8[kRequestFrames * bytes_per_frame]);
+ scoped_ptr<AudioBus> bus = AudioBus::Create(kChannels, kRequestFrames);
do {
TimeDelta audio_delay = TimeDelta::FromMicroseconds(
@@ -256,38 +264,38 @@ class AudioRendererImplTest : public ::testing::Test {
static_cast<float>(decoder_->samples_per_second()));
frames_read = renderer_->FillBuffer(
- buffer.get(), kRequestFrames, audio_delay.InMilliseconds());
+ bus.get(), kRequestFrames, audio_delay.InMilliseconds());
total_frames_read += frames_read;
} while (frames_read > 0);
- return total_frames_read * bytes_per_frame;
+ return total_frames_read;
}
- uint32 bytes_buffered() {
- return renderer_->algorithm_->bytes_buffered();
+ uint32 frames_buffered() {
+ return renderer_->algorithm_->frames_buffered();
}
uint32 buffer_capacity() {
return renderer_->algorithm_->QueueCapacity();
}
- uint32 bytes_remaining_in_buffer() {
+ uint32 frames_remaining_in_buffer() {
// This can happen if too much data was delivered, in which case the buffer
// will accept the data but not increase capacity.
- if (bytes_buffered() > buffer_capacity()) {
+ if (frames_buffered() > buffer_capacity()) {
return 0;
}
- return buffer_capacity() - bytes_buffered();
+ return buffer_capacity() - frames_buffered();
}
void CallResumeAfterUnderflow() {
renderer_->ResumeAfterUnderflow();
}
- TimeDelta CalculatePlayTime(int bytes_filled) {
+ TimeDelta CalculatePlayTime(int frames_filled) {
return TimeDelta::FromMicroseconds(
- bytes_filled * Time::kMicrosecondsPerSecond /
- renderer_->audio_parameters_.GetBytesPerSecond());
+ frames_filled * Time::kMicrosecondsPerSecond /
+ renderer_->audio_parameters_.sample_rate());
}
void EndOfStreamTest(float playback_rate) {
@@ -297,19 +305,20 @@ class AudioRendererImplTest : public ::testing::Test {
renderer_->SetPlaybackRate(playback_rate);
// Drain internal buffer, we should have a pending read.
- int total_bytes = bytes_buffered();
- int bytes_filled = ConsumeAllBufferedData();
+ int total_frames = frames_buffered();
+ int frames_filled = ConsumeAllBufferedData();
WaitForPendingRead();
// Due to how the cross-fade algorithm works we won't get an exact match
- // between the ideal and expected number of bytes consumed. In the faster
- // than normal playback case, more bytes are created than should exist and
+ // between the ideal and expected number of frames consumed. In the faster
+ // than normal playback case, more frames are created than should exist and
// vice versa in the slower than normal playback case.
- const float kEpsilon = 0.10 * (total_bytes / playback_rate);
- EXPECT_NEAR(bytes_filled, total_bytes / playback_rate, kEpsilon);
+ const float kEpsilon = 0.20 * (total_frames / playback_rate);
+ EXPECT_NEAR(frames_filled, total_frames / playback_rate, kEpsilon);
// Figure out how long until the ended event should fire.
- TimeDelta audio_play_time = CalculatePlayTime(bytes_filled);
+ TimeDelta audio_play_time = CalculatePlayTime(frames_filled);
+ DVLOG(1) << "audio_play_time = " << audio_play_time.InSecondsF();
// Fulfill the read with an end-of-stream packet. We shouldn't report ended
// nor have a read until we drain the internal buffer.
@@ -317,11 +326,11 @@ class AudioRendererImplTest : public ::testing::Test {
// Advance time half way without an ended expectation.
AdvanceTime(audio_play_time / 2);
- ConsumeBufferedData(bytes_buffered(), NULL);
+ ConsumeBufferedData(frames_buffered(), NULL);
// Advance time by other half and expect the ended event.
AdvanceTime(audio_play_time / 2);
- ConsumeBufferedData(bytes_buffered(), NULL);
+ ConsumeBufferedData(frames_buffered(), NULL);
WaitForEnded();
}
@@ -358,7 +367,7 @@ class AudioRendererImplTest : public ::testing::Test {
}
void DeliverBuffer(AudioDecoder::Status status,
- const scoped_refptr<DataBuffer>& buffer) {
+ const scoped_refptr<AudioBuffer>& buffer) {
CHECK(!read_cb_.is_null());
base::ResetAndReturn(&read_cb_).Run(status, buffer);
}
@@ -407,7 +416,7 @@ TEST_F(AudioRendererImplTest, Play) {
Play();
// Drain internal buffer, we should have a pending read.
- EXPECT_TRUE(ConsumeBufferedData(bytes_buffered(), NULL));
+ EXPECT_TRUE(ConsumeBufferedData(frames_buffered(), NULL));
WaitForPendingRead();
}
@@ -429,7 +438,7 @@ TEST_F(AudioRendererImplTest, Underflow) {
Play();
// Drain internal buffer, we should have a pending read.
- EXPECT_TRUE(ConsumeBufferedData(bytes_buffered(), NULL));
+ EXPECT_TRUE(ConsumeBufferedData(frames_buffered(), NULL));
WaitForPendingRead();
// Verify the next FillBuffer() call triggers the underflow callback
@@ -442,7 +451,7 @@ TEST_F(AudioRendererImplTest, Underflow) {
// Verify after resuming that we're still not getting data.
bool muted = false;
- EXPECT_EQ(0u, bytes_buffered());
+ EXPECT_EQ(0u, frames_buffered());
EXPECT_FALSE(ConsumeBufferedData(kDataSize, &muted));
EXPECT_TRUE(muted);
@@ -460,11 +469,11 @@ TEST_F(AudioRendererImplTest, Underflow_EndOfStream) {
// Figure out how long until the ended event should fire. Since
// ConsumeBufferedData() doesn't provide audio delay information, the time
// until the ended event fires is equivalent to the longest buffered section,
- // which is the initial bytes_buffered() read.
- TimeDelta time_until_ended = CalculatePlayTime(bytes_buffered());
+ // which is the initial frames_buffered() read.
+ TimeDelta time_until_ended = CalculatePlayTime(frames_buffered());
// Drain internal buffer, we should have a pending read.
- EXPECT_TRUE(ConsumeBufferedData(bytes_buffered(), NULL));
+ EXPECT_TRUE(ConsumeBufferedData(frames_buffered(), NULL));
WaitForPendingRead();
// Verify the next FillBuffer() call triggers the underflow callback
@@ -479,13 +488,13 @@ TEST_F(AudioRendererImplTest, Underflow_EndOfStream) {
// Verify we're getting muted audio during underflow.
bool muted = false;
- EXPECT_EQ(kDataSize, bytes_buffered());
+ EXPECT_EQ(kDataSize, frames_buffered());
EXPECT_FALSE(ConsumeBufferedData(kDataSize, &muted));
EXPECT_TRUE(muted);
// Now deliver end of stream, we should get our little bit of data back.
DeliverEndOfStream();
- EXPECT_EQ(kDataSize, bytes_buffered());
+ EXPECT_EQ(kDataSize, frames_buffered());
EXPECT_TRUE(ConsumeBufferedData(kDataSize, &muted));
EXPECT_FALSE(muted);
@@ -502,7 +511,7 @@ TEST_F(AudioRendererImplTest, Underflow_ResumeFromCallback) {
Play();
// Drain internal buffer, we should have a pending read.
- EXPECT_TRUE(ConsumeBufferedData(bytes_buffered(), NULL));
+ EXPECT_TRUE(ConsumeBufferedData(frames_buffered(), NULL));
WaitForPendingRead();
// Verify the next FillBuffer() call triggers the underflow callback
@@ -514,7 +523,7 @@ TEST_F(AudioRendererImplTest, Underflow_ResumeFromCallback) {
// Verify after resuming that we're still not getting data.
bool muted = false;
- EXPECT_EQ(0u, bytes_buffered());
+ EXPECT_EQ(0u, frames_buffered());
EXPECT_FALSE(ConsumeBufferedData(kDataSize, &muted));
EXPECT_TRUE(muted);
@@ -547,7 +556,7 @@ TEST_F(AudioRendererImplTest, AbortPendingRead_Pause) {
Play();
// Partially drain internal buffer so we get a pending read.
- EXPECT_TRUE(ConsumeBufferedData(bytes_buffered() / 2, NULL));
+ EXPECT_TRUE(ConsumeBufferedData(frames_buffered() / 2, NULL));
WaitForPendingRead();
// Start pausing.
diff --git a/media/filters/decrypting_audio_decoder.cc b/media/filters/decrypting_audio_decoder.cc
index 297e03f..93cf114 100644
--- a/media/filters/decrypting_audio_decoder.cc
+++ b/media/filters/decrypting_audio_decoder.cc
@@ -11,10 +11,10 @@
#include "base/location.h"
#include "base/logging.h"
#include "base/message_loop/message_loop_proxy.h"
+#include "media/base/audio_buffer.h"
#include "media/base/audio_decoder_config.h"
#include "media/base/bind_to_loop.h"
#include "media/base/buffers.h"
-#include "media/base/data_buffer.h"
#include "media/base/decoder_buffer.h"
#include "media/base/decryptor.h"
#include "media/base/demuxer_stream.h"
@@ -96,7 +96,7 @@ void DecryptingAudioDecoder::Read(const ReadCB& read_cb) {
// Return empty (end-of-stream) frames if decoding has finished.
if (state_ == kDecodeFinished) {
- base::ResetAndReturn(&read_cb_).Run(kOk, DataBuffer::CreateEOSBuffer());
+ base::ResetAndReturn(&read_cb_).Run(kOk, AudioBuffer::CreateEOSBuffer());
return;
}
@@ -396,7 +396,7 @@ void DecryptingAudioDecoder::DeliverFrame(
DVLOG(2) << "DeliverFrame() - kNeedMoreData";
if (scoped_pending_buffer_to_decode->IsEndOfStream()) {
state_ = kDecodeFinished;
- base::ResetAndReturn(&read_cb_).Run(kOk, DataBuffer::CreateEOSBuffer());
+ base::ResetAndReturn(&read_cb_).Run(kOk, AudioBuffer::CreateEOSBuffer());
return;
}
@@ -454,12 +454,12 @@ void DecryptingAudioDecoder::EnqueueFrames(
queued_audio_frames_ = frames;
for (Decryptor::AudioBuffers::iterator iter = queued_audio_frames_.begin();
- iter != queued_audio_frames_.end();
- ++iter) {
- scoped_refptr<DataBuffer>& frame = *iter;
+ iter != queued_audio_frames_.end();
+ ++iter) {
+ scoped_refptr<AudioBuffer>& frame = *iter;
DCHECK(!frame->end_of_stream()) << "EOS frame returned.";
- DCHECK_GT(frame->data_size(), 0) << "Empty frame returned.";
+ DCHECK_GT(frame->frame_count(), 0) << "Empty frame returned.";
base::TimeDelta cur_timestamp = output_timestamp_base_ +
NumberOfSamplesToDuration(total_samples_decoded_);
@@ -471,11 +471,7 @@ void DecryptingAudioDecoder::EnqueueFrames(
}
frame->set_timestamp(cur_timestamp);
- int frame_size = frame->data_size();
- DCHECK_EQ(frame_size % bytes_per_sample_, 0) <<
- "Decoder didn't output full samples";
- int samples_decoded = frame_size / bytes_per_sample_;
- total_samples_decoded_ += samples_decoded;
+ total_samples_decoded_ += frame->frame_count();
base::TimeDelta next_timestamp = output_timestamp_base_ +
NumberOfSamplesToDuration(total_samples_decoded_);
diff --git a/media/filters/decrypting_audio_decoder_unittest.cc b/media/filters/decrypting_audio_decoder_unittest.cc
index e0eea27..1a8751c 100644
--- a/media/filters/decrypting_audio_decoder_unittest.cc
+++ b/media/filters/decrypting_audio_decoder_unittest.cc
@@ -8,8 +8,8 @@
#include "base/bind.h"
#include "base/callback_helpers.h"
#include "base/message_loop.h"
+#include "media/base/audio_buffer.h"
#include "media/base/buffers.h"
-#include "media/base/data_buffer.h"
#include "media/base/decoder_buffer.h"
#include "media/base/decrypt_config.h"
#include "media/base/gmock_callback_support.h"
@@ -80,17 +80,22 @@ class DecryptingAudioDecoderTest : public testing::Test {
demuxer_(new StrictMock<MockDemuxerStream>(DemuxerStream::AUDIO)),
encrypted_buffer_(CreateFakeEncryptedBuffer()),
decoded_frame_(NULL),
- end_of_stream_frame_(DataBuffer::CreateEOSBuffer()),
+ end_of_stream_frame_(AudioBuffer::CreateEOSBuffer()),
decoded_frame_list_() {
- scoped_refptr<DataBuffer> data_buffer = new DataBuffer(kFakeAudioFrameSize);
- data_buffer->set_data_size(kFakeAudioFrameSize);
- // |decoded_frame_| contains random data.
- decoded_frame_ = data_buffer;
- decoded_frame_list_.push_back(decoded_frame_);
}
void InitializeAndExpectStatus(const AudioDecoderConfig& config,
PipelineStatus status) {
+ // Initialize data now that the config is known. Since the code uses
+ // invalid values (that CreateEmptyBuffer() doesn't support), tweak them
+ // just for CreateEmptyBuffer().
+ int channels = ChannelLayoutToChannelCount(config.channel_layout());
+ if (channels < 1)
+ channels = 1;
+ decoded_frame_ = AudioBuffer::CreateEmptyBuffer(
+ channels, kFakeAudioFrameSize, kNoTimestamp(), kNoTimestamp());
+ decoded_frame_list_.push_back(decoded_frame_);
+
demuxer_->set_audio_decoder_config(config);
decoder_->Initialize(demuxer_.get(), NewExpectedStatusCB(status),
base::Bind(&MockStatisticsCB::OnStatistics,
@@ -119,7 +124,7 @@ class DecryptingAudioDecoderTest : public testing::Test {
void ReadAndExpectFrameReadyWith(
AudioDecoder::Status status,
- const scoped_refptr<DataBuffer>& audio_frame) {
+ const scoped_refptr<AudioBuffer>& audio_frame) {
if (status != AudioDecoder::kOk)
EXPECT_CALL(*this, FrameReady(status, IsNull()));
else if (audio_frame->end_of_stream())
@@ -213,8 +218,8 @@ class DecryptingAudioDecoderTest : public testing::Test {
MOCK_METHOD1(RequestDecryptorNotification, void(const DecryptorReadyCB&));
- MOCK_METHOD2(FrameReady, void(AudioDecoder::Status,
- const scoped_refptr<DataBuffer>&));
+ MOCK_METHOD2(FrameReady,
+ void(AudioDecoder::Status, const scoped_refptr<AudioBuffer>&));
base::MessageLoop message_loop_;
scoped_ptr<DecryptingAudioDecoder> decoder_;
@@ -230,8 +235,8 @@ class DecryptingAudioDecoderTest : public testing::Test {
// Constant buffer/frames to be returned by the |demuxer_| and |decryptor_|.
scoped_refptr<DecoderBuffer> encrypted_buffer_;
- scoped_refptr<DataBuffer> decoded_frame_;
- scoped_refptr<DataBuffer> end_of_stream_frame_;
+ scoped_refptr<AudioBuffer> decoded_frame_;
+ scoped_refptr<AudioBuffer> end_of_stream_frame_;
Decryptor::AudioBuffers decoded_frame_list_;
private:
@@ -321,10 +326,16 @@ TEST_F(DecryptingAudioDecoderTest, DecryptAndDecode_NeedMoreData) {
TEST_F(DecryptingAudioDecoderTest, DecryptAndDecode_MultipleFrames) {
Initialize();
- scoped_refptr<DataBuffer> frame_a = new DataBuffer(kFakeAudioFrameSize);
- frame_a->set_data_size(kFakeAudioFrameSize);
- scoped_refptr<DataBuffer> frame_b = new DataBuffer(kFakeAudioFrameSize);
- frame_b->set_data_size(kFakeAudioFrameSize);
+ scoped_refptr<AudioBuffer> frame_a = AudioBuffer::CreateEmptyBuffer(
+ ChannelLayoutToChannelCount(config_.channel_layout()),
+ kFakeAudioFrameSize,
+ kNoTimestamp(),
+ kNoTimestamp());
+ scoped_refptr<AudioBuffer> frame_b = AudioBuffer::CreateEmptyBuffer(
+ ChannelLayoutToChannelCount(config_.channel_layout()),
+ kFakeAudioFrameSize,
+ kNoTimestamp(),
+ kNoTimestamp());
decoded_frame_list_.push_back(frame_a);
decoded_frame_list_.push_back(frame_b);
diff --git a/media/filters/ffmpeg_audio_decoder.cc b/media/filters/ffmpeg_audio_decoder.cc
index 7754a49..845e18f 100644
--- a/media/filters/ffmpeg_audio_decoder.cc
+++ b/media/filters/ffmpeg_audio_decoder.cc
@@ -8,14 +8,15 @@
#include "base/callback_helpers.h"
#include "base/location.h"
#include "base/message_loop/message_loop_proxy.h"
+#include "media/base/audio_buffer.h"
#include "media/base/audio_bus.h"
#include "media/base/audio_decoder_config.h"
#include "media/base/audio_timestamp_helper.h"
#include "media/base/bind_to_loop.h"
-#include "media/base/data_buffer.h"
#include "media/base/decoder_buffer.h"
#include "media/base/demuxer.h"
#include "media/base/pipeline.h"
+#include "media/base/sample_format.h"
#include "media/ffmpeg/ffmpeg_common.h"
#include "media/filters/ffmpeg_glue.h"
@@ -24,7 +25,7 @@ namespace media {
// Helper structure for managing multiple decoded audio frames per packet.
struct QueuedAudioBuffer {
AudioDecoder::Status status;
- scoped_refptr<DataBuffer> buffer;
+ scoped_refptr<AudioBuffer> buffer;
};
// Returns true if the decode result was end of stream.
@@ -270,10 +271,6 @@ bool FFmpegAudioDecoder::ConfigureDecoder() {
codec_context_ = avcodec_alloc_context3(NULL);
AudioDecoderConfigToAVCodecContext(config, codec_context_);
- // MP3 decodes to S16P which we don't support, tell it to use S16 instead.
- if (codec_context_->sample_fmt == AV_SAMPLE_FMT_S16P)
- codec_context_->request_sample_fmt = AV_SAMPLE_FMT_S16;
-
AVCodec* codec = avcodec_find_decoder(codec_context_->codec_id);
if (!codec || avcodec_open2(codec_context_, codec, NULL) < 0) {
DLOG(ERROR) << "Could not initialize audio decoder: "
@@ -281,38 +278,19 @@ bool FFmpegAudioDecoder::ConfigureDecoder() {
return false;
}
- // Ensure avcodec_open2() respected our format request.
- if (codec_context_->sample_fmt == AV_SAMPLE_FMT_S16P) {
- DLOG(ERROR) << "Unable to configure a supported sample format: "
- << codec_context_->sample_fmt;
- return false;
- }
-
- // Some codecs will only output float data, so we need to convert to integer
- // before returning the decoded buffer.
- if (codec_context_->sample_fmt == AV_SAMPLE_FMT_FLTP ||
- codec_context_->sample_fmt == AV_SAMPLE_FMT_FLT) {
- // Preallocate the AudioBus for float conversions. We can treat interleaved
- // float data as a single planar channel since our output is expected in an
- // interleaved format anyways.
- int channels = codec_context_->channels;
- if (codec_context_->sample_fmt == AV_SAMPLE_FMT_FLT)
- channels = 1;
- converter_bus_ = AudioBus::CreateWrapper(channels);
- }
-
// Success!
av_frame_ = avcodec_alloc_frame();
bits_per_channel_ = config.bits_per_channel();
channel_layout_ = config.channel_layout();
samples_per_second_ = config.samples_per_second();
- output_timestamp_helper_.reset(new AudioTimestampHelper(
- config.bytes_per_frame(), config.samples_per_second()));
+ output_timestamp_helper_.reset(
+ new AudioTimestampHelper(config.samples_per_second()));
bytes_per_frame_ = config.bytes_per_frame();
// Store initial values to guard against midstream configuration changes.
channels_ = codec_context_->channels;
av_sample_format_ = codec_context_->sample_fmt;
+ sample_format_ = config.sample_format();
return true;
}
@@ -424,71 +402,35 @@ void FFmpegAudioDecoder::RunDecodeLoop(
decoded_audio_size = av_samples_get_buffer_size(
NULL, codec_context_->channels, av_frame_->nb_samples,
codec_context_->sample_fmt, 1);
- // If we're decoding into float, adjust audio size.
- if (converter_bus_ && bits_per_channel_ / 8 != sizeof(float)) {
- DCHECK(codec_context_->sample_fmt == AV_SAMPLE_FMT_FLT ||
- codec_context_->sample_fmt == AV_SAMPLE_FMT_FLTP);
- decoded_audio_size *=
- static_cast<float>(bits_per_channel_ / 8) / sizeof(float);
- }
}
- int start_sample = 0;
if (decoded_audio_size > 0 && output_bytes_to_drop_ > 0) {
DCHECK_EQ(decoded_audio_size % bytes_per_frame_, 0)
<< "Decoder didn't output full frames";
int dropped_size = std::min(decoded_audio_size, output_bytes_to_drop_);
- start_sample = dropped_size / bytes_per_frame_;
decoded_audio_size -= dropped_size;
output_bytes_to_drop_ -= dropped_size;
}
- scoped_refptr<DataBuffer> output;
+ scoped_refptr<AudioBuffer> output;
if (decoded_audio_size > 0) {
DCHECK_EQ(decoded_audio_size % bytes_per_frame_, 0)
<< "Decoder didn't output full frames";
- // Convert float data using an AudioBus.
- if (converter_bus_) {
- // Setup the AudioBus as a wrapper of the AVFrame data and then use
- // AudioBus::ToInterleaved() to convert the data as necessary.
- int skip_frames = start_sample;
- int total_frames = av_frame_->nb_samples;
- int frames_to_interleave = decoded_audio_size / bytes_per_frame_;
- if (codec_context_->sample_fmt == AV_SAMPLE_FMT_FLT) {
- DCHECK_EQ(converter_bus_->channels(), 1);
- total_frames *= codec_context_->channels;
- skip_frames *= codec_context_->channels;
- frames_to_interleave *= codec_context_->channels;
- }
-
- converter_bus_->set_frames(total_frames);
- for (int i = 0; i < converter_bus_->channels(); ++i) {
- converter_bus_->SetChannelData(i, reinterpret_cast<float*>(
- av_frame_->extended_data[i]));
- }
-
- output = new DataBuffer(decoded_audio_size);
- output->set_data_size(decoded_audio_size);
-
- DCHECK_EQ(frames_to_interleave, converter_bus_->frames() - skip_frames);
- converter_bus_->ToInterleavedPartial(
- skip_frames, frames_to_interleave, bits_per_channel_ / 8,
- output->writable_data());
- } else {
- output = DataBuffer::CopyFrom(
- av_frame_->extended_data[0] + start_sample * bytes_per_frame_,
- decoded_audio_size);
- }
- output->set_timestamp(output_timestamp_helper_->GetTimestamp());
- output->set_duration(
- output_timestamp_helper_->GetDuration(decoded_audio_size));
- output_timestamp_helper_->AddBytes(decoded_audio_size);
+ int decoded_frames = decoded_audio_size / bytes_per_frame_;
+ output = AudioBuffer::CopyFrom(
+ sample_format_,
+ channels_,
+ decoded_frames,
+ av_frame_->extended_data,
+ output_timestamp_helper_->GetTimestamp(),
+ output_timestamp_helper_->GetFrameDuration(decoded_frames));
+ output_timestamp_helper_->AddFrames(decoded_frames);
} else if (IsEndOfStream(result, decoded_audio_size, input) &&
!skip_eos_append) {
DCHECK_EQ(packet.size, 0);
- output = DataBuffer::CreateEOSBuffer();
+ output = AudioBuffer::CreateEOSBuffer();
}
if (output.get()) {
diff --git a/media/filters/ffmpeg_audio_decoder.h b/media/filters/ffmpeg_audio_decoder.h
index 7686fb2..ee770d0 100644
--- a/media/filters/ffmpeg_audio_decoder.h
+++ b/media/filters/ffmpeg_audio_decoder.h
@@ -12,6 +12,7 @@
#include "base/time/time.h"
#include "media/base/audio_decoder.h"
#include "media/base/demuxer_stream.h"
+#include "media/base/sample_format.h"
struct AVCodecContext;
struct AVFrame;
@@ -22,9 +23,7 @@ class MessageLoopProxy;
namespace media {
-class AudioBus;
class AudioTimestampHelper;
-class DataBuffer;
class DecoderBuffer;
struct QueuedAudioBuffer;
@@ -72,6 +71,7 @@ class MEDIA_EXPORT FFmpegAudioDecoder : public AudioDecoder {
// AVSampleFormat initially requested; not Chrome's SampleFormat.
int av_sample_format_;
+ SampleFormat sample_format_;
// Used for computing output timestamps.
scoped_ptr<AudioTimestampHelper> output_timestamp_helper_;
@@ -91,10 +91,6 @@ class MEDIA_EXPORT FFmpegAudioDecoder : public AudioDecoder {
// them up and hand them out as we receive Read() calls.
std::list<QueuedAudioBuffer> queued_audio_;
- // We may need to convert the audio data coming out of FFmpeg from planar
- // float to integer.
- scoped_ptr<AudioBus> converter_bus_;
-
DISALLOW_IMPLICIT_CONSTRUCTORS(FFmpegAudioDecoder);
};
diff --git a/media/filters/ffmpeg_audio_decoder_unittest.cc b/media/filters/ffmpeg_audio_decoder_unittest.cc
index 4434caa..8957b77 100644
--- a/media/filters/ffmpeg_audio_decoder_unittest.cc
+++ b/media/filters/ffmpeg_audio_decoder_unittest.cc
@@ -7,7 +7,7 @@
#include "base/bind.h"
#include "base/message_loop.h"
#include "base/strings/stringprintf.h"
-#include "media/base/data_buffer.h"
+#include "media/base/audio_buffer.h"
#include "media/base/decoder_buffer.h"
#include "media/base/mock_filters.h"
#include "media/base/test_data_util.h"
@@ -90,7 +90,7 @@ class FFmpegAudioDecoderTest : public testing::Test {
}
void DecodeFinished(AudioDecoder::Status status,
- const scoped_refptr<DataBuffer>& buffer) {
+ const scoped_refptr<AudioBuffer>& buffer) {
decoded_audio_.push_back(buffer);
}
@@ -114,7 +114,7 @@ class FFmpegAudioDecoderTest : public testing::Test {
scoped_refptr<DecoderBuffer> vorbis_extradata_;
std::deque<scoped_refptr<DecoderBuffer> > encoded_audio_;
- std::deque<scoped_refptr<DataBuffer> > decoded_audio_;
+ std::deque<scoped_refptr<AudioBuffer> > decoded_audio_;
};
TEST_F(FFmpegAudioDecoderTest, Initialize) {
diff --git a/media/filters/opus_audio_decoder.cc b/media/filters/opus_audio_decoder.cc
index 6e5d9ed..f8afbdd 100644
--- a/media/filters/opus_audio_decoder.cc
+++ b/media/filters/opus_audio_decoder.cc
@@ -9,11 +9,11 @@
#include "base/location.h"
#include "base/message_loop/message_loop_proxy.h"
#include "base/sys_byteorder.h"
+#include "media/base/audio_buffer.h"
#include "media/base/audio_decoder_config.h"
#include "media/base/audio_timestamp_helper.h"
#include "media/base/bind_to_loop.h"
#include "media/base/buffers.h"
-#include "media/base/data_buffer.h"
#include "media/base/decoder_buffer.h"
#include "media/base/demuxer.h"
#include "media/base/pipeline.h"
@@ -357,7 +357,7 @@ void OpusAudioDecoder::BufferReady(
// Libopus does not buffer output. Decoding is complete when an end of stream
// input buffer is received.
if (input->IsEndOfStream()) {
- base::ResetAndReturn(&read_cb_).Run(kOk, DataBuffer::CreateEOSBuffer());
+ base::ResetAndReturn(&read_cb_).Run(kOk, AudioBuffer::CreateEOSBuffer());
return;
}
@@ -383,7 +383,7 @@ void OpusAudioDecoder::BufferReady(
last_input_timestamp_ = input->GetTimestamp();
- scoped_refptr<DataBuffer> output_buffer;
+ scoped_refptr<AudioBuffer> output_buffer;
if (!Decode(input, &output_buffer)) {
base::ResetAndReturn(&read_cb_).Run(kDecodeError, NULL);
@@ -493,8 +493,8 @@ bool OpusAudioDecoder::ConfigureDecoder() {
bits_per_channel_ = config.bits_per_channel();
channel_layout_ = config.channel_layout();
samples_per_second_ = config.samples_per_second();
- output_timestamp_helper_.reset(new AudioTimestampHelper(
- config.bytes_per_frame(), config.samples_per_second()));
+ output_timestamp_helper_.reset(
+ new AudioTimestampHelper(config.samples_per_second()));
return true;
}
@@ -512,13 +512,13 @@ void OpusAudioDecoder::ResetTimestampState() {
}
bool OpusAudioDecoder::Decode(const scoped_refptr<DecoderBuffer>& input,
- scoped_refptr<DataBuffer>* output_buffer) {
- const int samples_decoded =
- opus_multistream_decode(opus_decoder_,
- input->GetData(), input->GetDataSize(),
- &output_buffer_[0],
- kMaxOpusOutputPacketSizeSamples,
- 0);
+ scoped_refptr<AudioBuffer>* output_buffer) {
+ int samples_decoded = opus_multistream_decode(opus_decoder_,
+ input->GetData(),
+ input->GetDataSize(),
+ &output_buffer_[0],
+ kMaxOpusOutputPacketSizeSamples,
+ 0);
if (samples_decoded < 0) {
LOG(ERROR) << "opus_multistream_decode failed for"
<< " timestamp: " << input->GetTimestamp().InMicroseconds()
@@ -545,16 +545,21 @@ bool OpusAudioDecoder::Decode(const scoped_refptr<DecoderBuffer>& input,
decoded_audio_data += dropped_size;
decoded_audio_size -= dropped_size;
output_bytes_to_drop_ -= dropped_size;
+ samples_decoded = decoded_audio_size /
+ demuxer_stream_->audio_decoder_config().bytes_per_frame();
}
if (decoded_audio_size > 0) {
// Copy the audio samples into an output buffer.
- *output_buffer = DataBuffer::CopyFrom(
- decoded_audio_data, decoded_audio_size);
- (*output_buffer)->set_timestamp(output_timestamp_helper_->GetTimestamp());
- (*output_buffer)->set_duration(
- output_timestamp_helper_->GetDuration(decoded_audio_size));
- output_timestamp_helper_->AddBytes(decoded_audio_size);
+ uint8* data[] = { decoded_audio_data };
+ *output_buffer = AudioBuffer::CopyFrom(
+ kSampleFormatS16,
+ ChannelLayoutToChannelCount(channel_layout_),
+ samples_decoded,
+ data,
+ output_timestamp_helper_->GetTimestamp(),
+ output_timestamp_helper_->GetFrameDuration(samples_decoded));
+ output_timestamp_helper_->AddFrames(samples_decoded);
}
// Decoding finished successfully, update statistics.
diff --git a/media/filters/opus_audio_decoder.h b/media/filters/opus_audio_decoder.h
index e6f03ec..a808ff3 100644
--- a/media/filters/opus_audio_decoder.h
+++ b/media/filters/opus_audio_decoder.h
@@ -19,8 +19,8 @@ class MessageLoopProxy;
namespace media {
+class AudioBuffer;
class AudioTimestampHelper;
-class DataBuffer;
class DecoderBuffer;
struct QueuedAudioBuffer;
@@ -51,7 +51,7 @@ class MEDIA_EXPORT OpusAudioDecoder : public AudioDecoder {
void CloseDecoder();
void ResetTimestampState();
bool Decode(const scoped_refptr<DecoderBuffer>& input,
- scoped_refptr<DataBuffer>* output_buffer);
+ scoped_refptr<AudioBuffer>* output_buffer);
scoped_refptr<base::MessageLoopProxy> message_loop_;
base::WeakPtrFactory<OpusAudioDecoder> weak_factory_;