summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--media/audio/audio_util.cc48
-rw-r--r--media/audio/audio_util.h13
-rw-r--r--media/base/audio_bus.cc65
-rw-r--r--media/base/audio_bus.h11
-rw-r--r--media/base/audio_bus_unittest.cc63
-rw-r--r--media/filters/audio_file_reader.cc74
-rw-r--r--media/filters/audio_file_reader.h8
-rw-r--r--webkit/glue/webkit_glue.gypi1
-rw-r--r--webkit/media/audio_decoder.cc20
9 files changed, 153 insertions, 150 deletions
diff --git a/media/audio/audio_util.cc b/media/audio/audio_util.cc
index 19aef12..9f4cdbd 100644
--- a/media/audio/audio_util.cc
+++ b/media/audio/audio_util.cc
@@ -177,54 +177,6 @@ bool FoldChannels(void* buf,
return false;
}
-// TODO(dalecurtis): Delete once everywhere is using the AudioBus version:
-// http://crbug.com/120319.
-bool DeinterleaveAudioChannel(void* source,
- float* destination,
- int channels,
- int channel_index,
- int bytes_per_sample,
- size_t number_of_frames) {
- switch (bytes_per_sample) {
- case 1:
- {
- uint8* source8 = reinterpret_cast<uint8*>(source) + channel_index;
- const float kScale = 1.0f / 128.0f;
- for (unsigned i = 0; i < number_of_frames; ++i) {
- destination[i] = kScale * (static_cast<int>(*source8) - 128);
- source8 += channels;
- }
- return true;
- }
-
- case 2:
- {
- int16* source16 = reinterpret_cast<int16*>(source) + channel_index;
- const float kScale = 1.0f / 32768.0f;
- for (unsigned i = 0; i < number_of_frames; ++i) {
- destination[i] = kScale * *source16;
- source16 += channels;
- }
- return true;
- }
-
- case 4:
- {
- int32* source32 = reinterpret_cast<int32*>(source) + channel_index;
- const float kScale = 1.0f / 2147483648.0f;
- for (unsigned i = 0; i < number_of_frames; ++i) {
- destination[i] = kScale * *source32;
- source32 += channels;
- }
- return true;
- }
-
- default:
- break;
- }
- return false;
-}
-
// TODO(enal): use template specialization and size-specific intrinsics.
// Call is on the time-critical path, and by using SSE/AVX
// instructions we can speed things up by ~4-8x, more for the case
diff --git a/media/audio/audio_util.h b/media/audio/audio_util.h
index 9931935..d5522cc 100644
--- a/media/audio/audio_util.h
+++ b/media/audio/audio_util.h
@@ -67,19 +67,6 @@ MEDIA_EXPORT bool FoldChannels(void* buf,
int bytes_per_sample,
float volume);
-// DeinterleaveAudioChannel() takes interleaved audio buffer |source|
-// of the given |sample_fmt| and |number_of_channels| and extracts
-// |number_of_frames| data for the given |channel_index| and
-// puts it in the floating point |destination|.
-// It returns |true| on success, or |false| if the |sample_fmt| is
-// not recognized.
-MEDIA_EXPORT bool DeinterleaveAudioChannel(void* source,
- float* destination,
- int channels,
- int channel_index,
- int bytes_per_sample,
- size_t number_of_frames);
-
// Returns the default audio output hardware sample-rate.
MEDIA_EXPORT int GetAudioHardwareSampleRate();
diff --git a/media/base/audio_bus.cc b/media/base/audio_bus.cc
index a7d9ad0..e4dd01a 100644
--- a/media/base/audio_bus.cc
+++ b/media/base/audio_bus.cc
@@ -33,8 +33,8 @@ static int CalculateMemorySizeInternal(int channels, int frames,
// |Format| is the destination type, |Fixed| is a type larger than |Format|
// such that operations can be made without overflowing.
template<class Format, class Fixed>
-static void FromInterleavedInternal(const void* src, int frames,
- AudioBus* dest) {
+static void FromInterleavedInternal(const void* src, int start_frame,
+ int frames, AudioBus* dest) {
const Format* source = static_cast<const Format*>(src);
static const Fixed kBias = std::numeric_limits<Format>::is_signed ? 0 :
@@ -47,7 +47,8 @@ static void FromInterleavedInternal(const void* src, int frames,
int channels = dest->channels();
for (int ch = 0; ch < channels; ++ch) {
float* channel_data = dest->channel(ch);
- for (int i = 0, offset = ch; i < frames; ++i, offset += channels) {
+ for (int i = start_frame, offset = ch; i < start_frame + frames;
+ ++i, offset += channels) {
Fixed v = static_cast<Fixed>(source[offset]) - kBias;
channel_data[i] = v * (v < 0 ? kMinScale : kMaxScale);
}
@@ -87,11 +88,17 @@ static void ToInterleavedInternal(const AudioBus* source, int frames,
static void ValidateConfig(int channels, int frames) {
CHECK_GT(frames, 0);
- CHECK_LE(frames, limits::kMaxSamplesPerPacket);
CHECK_GT(channels, 0);
CHECK_LE(channels, limits::kMaxChannels);
- DCHECK_LT(limits::kMaxSamplesPerPacket * limits::kMaxChannels,
- std::numeric_limits<int>::max());
+}
+
+static void CheckOverflow(int start_frame, int frames, int total_frames) {
+ CHECK_GE(start_frame, 0);
+ CHECK_GE(frames, 0);
+ CHECK_GT(total_frames, 0);
+ int sum = start_frame + frames;
+ CHECK_LE(sum, total_frames);
+ CHECK_GE(sum, 0);
}
AudioBus::AudioBus(int channels, int frames)
@@ -160,10 +167,20 @@ scoped_ptr<AudioBus> AudioBus::WrapMemory(const AudioParameters& params,
static_cast<float*>(data)));
}
+void AudioBus::ZeroFramesPartial(int start_frame, int frames) {
+ CheckOverflow(start_frame, frames, frames_);
+
+ if (frames <= 0)
+ return;
+
+ for (size_t i = 0; i < channel_data_.size(); ++i) {
+ memset(channel_data_[i] + start_frame, 0,
+ frames * sizeof(*channel_data_[i]));
+ }
+}
+
void AudioBus::ZeroFrames(int frames) {
- DCHECK_LE(frames, frames_);
- for (size_t i = 0; i < channel_data_.size(); ++i)
- memset(channel_data_[i], 0, frames * sizeof(*channel_data_[i]));
+ ZeroFramesPartial(0, frames);
}
void AudioBus::Zero() {
@@ -186,37 +203,41 @@ void AudioBus::BuildChannelData(int channels, int aligned_frames, float* data) {
}
// TODO(dalecurtis): See if intrinsic optimizations help any here.
-void AudioBus::FromInterleaved(const void* source, int frames,
- int bytes_per_sample) {
- DCHECK_LE(frames, frames_);
+void AudioBus::FromInterleavedPartial(const void* source, int start_frame,
+ int frames, int bytes_per_sample) {
+ CheckOverflow(start_frame, frames, frames_);
switch (bytes_per_sample) {
case 1:
- FromInterleavedInternal<uint8, int16>(source, frames, this);
+ FromInterleavedInternal<uint8, int16>(source, start_frame, frames, this);
break;
case 2:
- FromInterleavedInternal<int16, int32>(source, frames, this);
+ FromInterleavedInternal<int16, int32>(source, start_frame, frames, this);
break;
case 4:
- FromInterleavedInternal<int32, int64>(source, frames, this);
+ FromInterleavedInternal<int32, int64>(source, start_frame, frames, this);
break;
default:
NOTREACHED() << "Unsupported bytes per sample encountered.";
- Zero();
+ ZeroFramesPartial(start_frame, frames);
return;
}
- // Zero any remaining frames.
- int remaining_frames = (frames_ - frames);
- if (remaining_frames) {
- for (int ch = 0; ch < channels(); ++ch)
- memset(channel(ch) + frames, 0, sizeof(*channel(ch)) * remaining_frames);
+ // Don't clear remaining frames if this is a partial deinterleave.
+ if (!start_frame) {
+ // Zero any remaining frames.
+ ZeroFramesPartial(frames, frames_ - frames);
}
}
+void AudioBus::FromInterleaved(const void* source, int frames,
+ int bytes_per_sample) {
+ FromInterleavedPartial(source, 0, frames, bytes_per_sample);
+}
+
// TODO(dalecurtis): See if intrinsic optimizations help any here.
void AudioBus::ToInterleaved(int frames, int bytes_per_sample,
void* dest) const {
- DCHECK_LE(frames, frames_);
+ CheckOverflow(0, frames, frames_);
switch (bytes_per_sample) {
case 1:
ToInterleavedInternal<uint8, int16>(this, frames, dest);
diff --git a/media/base/audio_bus.h b/media/base/audio_bus.h
index acf6491..ea7e5f8 100644
--- a/media/base/audio_bus.h
+++ b/media/base/audio_bus.h
@@ -49,10 +49,18 @@ class MEDIA_EXPORT AudioBus {
// data. Expects interleaving to be [ch0, ch1, ..., chN, ch0, ch1, ...] with
// |bytes_per_sample| per value. Values are scaled and bias corrected during
// conversion. ToInterleaved() will also clip values to format range.
- // Handles uint8, int16, and int32 currently.
+ // Handles uint8, int16, and int32 currently. FromInterleaved() will zero out
+ // any unfilled frames when |frames| is less than frames().
void FromInterleaved(const void* source, int frames, int bytes_per_sample);
void ToInterleaved(int frames, int bytes_per_sample, void* dest) const;
+ // Similar to FromInterleaved() above, but meant for streaming sources. Does
+ // not zero out remaining frames, the caller is responsible for doing so using
+ // ZeroFramesPartial(). Frames are deinterleaved from the start of |source|
+ // to channel(x)[start_frame].
+ void FromInterleavedPartial(const void* source, int start_frame, int frames,
+ int bytes_per_sample);
+
// Helper method for copying channel data from one AudioBus to another. Both
// AudioBus object must have the same frames() and channels().
void CopyTo(AudioBus* dest) const;
@@ -68,6 +76,7 @@ class MEDIA_EXPORT AudioBus {
// Helper method for zeroing out all channels of audio data.
void Zero();
void ZeroFrames(int frames);
+ void ZeroFramesPartial(int start_frame, int frames);
private:
friend class scoped_ptr<AudioBus>;
diff --git a/media/base/audio_bus_unittest.cc b/media/base/audio_bus_unittest.cc
index f6c4b64..acc1907 100644
--- a/media/base/audio_bus_unittest.cc
+++ b/media/base/audio_bus_unittest.cc
@@ -14,7 +14,7 @@
static const int kChannels = 6;
static const ChannelLayout kChannelLayout = CHANNEL_LAYOUT_5_1;
// Use a buffer size which is intentionally not a multiple of kChannelAlignment.
-static const int kFrameCount = media::AudioBus::kChannelAlignment * 128 - 1;
+static const int kFrameCount = media::AudioBus::kChannelAlignment * 32 - 1;
static const int kSampleRate = 48000;
namespace media {
@@ -35,7 +35,7 @@ class AudioBusTest : public testing::Test {
void VerifyValue(const float data[], int size, float value) {
for (int i = 0; i < size; ++i)
- ASSERT_FLOAT_EQ(value, data[i]);
+ ASSERT_FLOAT_EQ(value, data[i]) << "i=" << i;
}
// Verify values for each channel in |result| against |expected|.
@@ -190,19 +190,42 @@ TEST_F(AudioBusTest, CopyTo) {
TEST_F(AudioBusTest, Zero) {
scoped_ptr<AudioBus> bus = AudioBus::Create(kChannels, kFrameCount);
- // First fill the bus with dummy data.
+ // Fill the bus with dummy data.
for (int i = 0; i < bus->channels(); ++i)
std::fill(bus->channel(i), bus->channel(i) + bus->frames(), i + 1);
- // Zero half the frames of each channel.
+ // Zero first half the frames of each channel.
bus->ZeroFrames(kFrameCount / 2);
- for (int i = 0; i < bus->channels(); ++i)
+ for (int i = 0; i < bus->channels(); ++i) {
+ SCOPED_TRACE("First Half Zero");
VerifyValue(bus->channel(i), kFrameCount / 2, 0);
+ VerifyValue(bus->channel(i) + kFrameCount / 2,
+ kFrameCount - kFrameCount / 2, i + 1);
+ }
+
+ // Fill the bus with dummy data.
+ for (int i = 0; i < bus->channels(); ++i)
+ std::fill(bus->channel(i), bus->channel(i) + bus->frames(), i + 1);
+
+ // Zero the last half of the frames.
+ bus->ZeroFramesPartial(kFrameCount / 2, kFrameCount - kFrameCount / 2);
+ for (int i = 0; i < bus->channels(); ++i) {
+ SCOPED_TRACE("Last Half Zero");
+ VerifyValue(bus->channel(i) + kFrameCount / 2,
+ kFrameCount - kFrameCount / 2, 0);
+ VerifyValue(bus->channel(i), kFrameCount / 2, i + 1);
+ }
+
+ // Fill the bus with dummy data.
+ for (int i = 0; i < bus->channels(); ++i)
+ std::fill(bus->channel(i), bus->channel(i) + bus->frames(), i + 1);
// Zero all the frames of each channel.
bus->Zero();
- for (int i = 0; i < bus->channels(); ++i)
+ for (int i = 0; i < bus->channels(); ++i) {
+ SCOPED_TRACE("All Zero");
VerifyValue(bus->channel(i), bus->frames(), 0);
+ }
}
// Each test vector represents two channels of data in the following arbitrary
@@ -233,24 +256,52 @@ TEST_F(AudioBusTest, FromInterleaved) {
}
{
SCOPED_TRACE("uint8");
+ bus->Zero();
bus->FromInterleaved(
kTestVectorUint8, kTestVectorFrames, sizeof(*kTestVectorUint8));
VerifyBus(bus.get(), expected.get());
}
{
SCOPED_TRACE("int16");
+ bus->Zero();
bus->FromInterleaved(
kTestVectorInt16, kTestVectorFrames, sizeof(*kTestVectorInt16));
VerifyBus(bus.get(), expected.get());
}
{
SCOPED_TRACE("int32");
+ bus->Zero();
bus->FromInterleaved(
kTestVectorInt32, kTestVectorFrames, sizeof(*kTestVectorInt32));
VerifyBus(bus.get(), expected.get());
}
}
+// Verify FromInterleavedPartial() deinterleaves audio correctly.
+TEST_F(AudioBusTest, FromInterleavedPartial) {
+ // Only deinterleave the middle two frames in each channel.
+ static const int kPartialStart = 1;
+ static const int kPartialFrames = 2;
+ ASSERT_LE(kPartialStart + kPartialFrames, kTestVectorFrames);
+
+ scoped_ptr<AudioBus> bus = AudioBus::Create(
+ kTestVectorChannels, kTestVectorFrames);
+ scoped_ptr<AudioBus> expected = AudioBus::Create(
+ kTestVectorChannels, kTestVectorFrames);
+ expected->Zero();
+ for (int ch = 0; ch < kTestVectorChannels; ++ch) {
+ memcpy(expected->channel(ch) + kPartialStart,
+ kTestVectorResult[ch] + kPartialStart,
+ kPartialFrames * sizeof(*expected->channel(ch)));
+ }
+
+ bus->Zero();
+ bus->FromInterleavedPartial(
+ kTestVectorInt32 + kPartialStart * bus->channels(), kPartialStart,
+ kPartialFrames, sizeof(*kTestVectorInt32));
+ VerifyBus(bus.get(), expected.get());
+}
+
// Verify ToInterleaved() interleaves audio in suported formats correctly.
TEST_F(AudioBusTest, ToInterleaved) {
scoped_ptr<AudioBus> bus = AudioBus::Create(
diff --git a/media/filters/audio_file_reader.cc b/media/filters/audio_file_reader.cc
index ccfafa1..8dfab47 100644
--- a/media/filters/audio_file_reader.cc
+++ b/media/filters/audio_file_reader.cc
@@ -8,7 +8,7 @@
#include "base/basictypes.h"
#include "base/string_util.h"
#include "base/time.h"
-#include "media/audio/audio_util.h"
+#include "media/base/audio_bus.h"
#include "media/ffmpeg/ffmpeg_common.h"
#include "media/filters/ffmpeg_glue.h"
@@ -17,8 +17,7 @@ namespace media {
AudioFileReader::AudioFileReader(FFmpegURLProtocol* protocol)
: protocol_(protocol),
format_context_(NULL),
- codec_context_(NULL),
- codec_(NULL) {
+ codec_context_(NULL) {
}
AudioFileReader::~AudioFileReader() {
@@ -81,9 +80,9 @@ bool AudioFileReader::Open() {
return false;
avformat_find_stream_info(format_context_, NULL);
- codec_ = avcodec_find_decoder(codec_context_->codec_id);
- if (codec_) {
- if ((result = avcodec_open2(codec_context_, codec_, NULL)) < 0) {
+ AVCodec* codec = avcodec_find_decoder(codec_context_->codec_id);
+ if (codec) {
+ if ((result = avcodec_open2(codec_context_, codec, NULL)) < 0) {
DLOG(WARNING) << "AudioFileReader::Open() : could not open codec -"
<< " result: " << result;
return false;
@@ -98,11 +97,10 @@ bool AudioFileReader::Open() {
}
void AudioFileReader::Close() {
- if (codec_context_ && codec_)
+ if (codec_context_) {
avcodec_close(codec_context_);
-
- codec_context_ = NULL;
- codec_ = NULL;
+ codec_context_ = NULL;
+ }
if (format_context_) {
avformat_close_input(&format_context_);
@@ -110,25 +108,25 @@ void AudioFileReader::Close() {
}
}
-bool AudioFileReader::Read(const std::vector<float*>& audio_data,
- size_t number_of_frames) {
- DCHECK(format_context_ && codec_context_ && codec_) <<
+bool AudioFileReader::Read(AudioBus* audio_bus) {
+ DCHECK(format_context_ && codec_context_) <<
"AudioFileReader::Read() : reader is not opened!";
- size_t channels = this->channels();
- DCHECK_EQ(audio_data.size(), channels);
- if (audio_data.size() != channels)
+ DCHECK_EQ(audio_bus->channels(), channels());
+ if (audio_bus->channels() != channels())
return false;
+ size_t bytes_per_sample = av_get_bytes_per_sample(codec_context_->sample_fmt);
+
// Holds decoded audio.
scoped_ptr_malloc<AVFrame, ScopedPtrAVFree> av_frame(avcodec_alloc_frame());
// Read until we hit EOF or we've read the requested number of frames.
AVPacket packet;
int result = 0;
- size_t current_frame = 0;
+ int current_frame = 0;
- while (current_frame < number_of_frames &&
+ while (current_frame < audio_bus->frames() &&
(result = av_read_frame(format_context_, &packet)) >= 0) {
avcodec_get_frame_defaults(av_frame.get());
int frame_decoded = 0;
@@ -140,45 +138,35 @@ bool AudioFileReader::Read(const std::vector<float*>& audio_data,
DLOG(WARNING)
<< "AudioFileReader::Read() : error in avcodec_decode_audio3() -"
<< result;
-
- // Fail if nothing has been decoded, otherwise return partial data.
- return current_frame > 0;
+ break;
}
if (!frame_decoded)
continue;
- // Determine the number of sample-frames we just decoded.
- size_t bytes_per_sample =
- av_get_bytes_per_sample(codec_context_->sample_fmt);
- size_t frames_read = av_frame->nb_samples;
+ // Determine the number of sample-frames we just decoded. Check overflow.
+ int frames_read = av_frame->nb_samples;
+ if (frames_read < 0)
+ break;
// Truncate, if necessary, if the destination isn't big enough.
- if (current_frame + frames_read > number_of_frames)
- frames_read = number_of_frames - current_frame;
+ if (current_frame + frames_read > audio_bus->frames())
+ frames_read = audio_bus->frames() - current_frame;
// Deinterleave each channel and convert to 32bit floating-point
// with nominal range -1.0 -> +1.0.
- for (size_t channel_index = 0; channel_index < channels;
- ++channel_index) {
- if (!DeinterleaveAudioChannel(av_frame->data[0],
- audio_data[channel_index] + current_frame,
- channels,
- channel_index,
- bytes_per_sample,
- frames_read)) {
- DLOG(WARNING)
- << "AudioFileReader::Read() : Unsupported sample format : "
- << codec_context_->sample_fmt
- << " codec_->id : " << codec_->id;
- return false;
- }
- }
+ audio_bus->FromInterleavedPartial(
+ av_frame->data[0], current_frame, frames_read, bytes_per_sample);
current_frame += frames_read;
}
- return true;
+ // Zero any remaining frames.
+ audio_bus->ZeroFramesPartial(
+ current_frame, audio_bus->frames() - current_frame);
+
+ // Fail if nothing has been decoded, otherwise return partial data.
+ return current_frame > 0;
}
} // namespace media
diff --git a/media/filters/audio_file_reader.h b/media/filters/audio_file_reader.h
index 6a3fd89..ee5a427 100644
--- a/media/filters/audio_file_reader.h
+++ b/media/filters/audio_file_reader.h
@@ -8,7 +8,6 @@
#include <vector>
#include "media/filters/ffmpeg_glue.h"
-struct AVCodec;
struct AVCodecContext;
struct AVFormatContext;
@@ -16,6 +15,7 @@ namespace base { class TimeDelta; }
namespace media {
+class AudioBus;
class FFmpegURLProtocol;
class MEDIA_EXPORT AudioFileReader {
@@ -32,12 +32,13 @@ class MEDIA_EXPORT AudioFileReader {
bool Open();
void Close();
- // After a call to Open(), reads |number_of_frames| into |audio_data|.
+ // After a call to Open(), attempts to fully fill |audio_bus| with decoded
+ // audio data. Any unfilled frames will be zeroed out.
// |audio_data| must be of the same size as channels().
// The audio data will be decoded as floating-point linear PCM with
// a nominal range of -1.0 -> +1.0.
// Returns |true| on success.
- bool Read(const std::vector<float*>& audio_data, size_t number_of_frames);
+ bool Read(AudioBus* audio_bus);
// These methods can be called once Open() has been called.
int channels() const;
@@ -49,7 +50,6 @@ class MEDIA_EXPORT AudioFileReader {
FFmpegURLProtocol* protocol_;
AVFormatContext* format_context_;
AVCodecContext* codec_context_;
- AVCodec* codec_;
DISALLOW_COPY_AND_ASSIGN(AudioFileReader);
};
diff --git a/webkit/glue/webkit_glue.gypi b/webkit/glue/webkit_glue.gypi
index ecb6e8f..6c7a448 100644
--- a/webkit/glue/webkit_glue.gypi
+++ b/webkit/glue/webkit_glue.gypi
@@ -128,6 +128,7 @@
'<(DEPTH)/gpu/gpu.gyp:gles2_c_lib',
'<(DEPTH)/gpu/gpu.gyp:gles2_implementation',
'<(DEPTH)/media/media.gyp:media',
+ '<(DEPTH)/media/media.gyp:shared_memory_support',
'<(DEPTH)/net/net.gyp:net',
'<(DEPTH)/ppapi/ppapi.gyp:ppapi_c',
'<(DEPTH)/ppapi/ppapi_internal.gyp:ppapi_shared',
diff --git a/webkit/media/audio_decoder.cc b/webkit/media/audio_decoder.cc
index 589184a..b9e758e 100644
--- a/webkit/media/audio_decoder.cc
+++ b/webkit/media/audio_decoder.cc
@@ -8,11 +8,13 @@
#include "base/basictypes.h"
#include "base/string_util.h"
#include "base/time.h"
+#include "media/base/audio_bus.h"
#include "media/base/limits.h"
#include "media/filters/audio_file_reader.h"
#include "media/filters/in_memory_url_protocol.h"
#include "third_party/WebKit/Source/WebKit/chromium/public/platform/WebAudioBus.h"
+using media::AudioBus;
using media::AudioFileReader;
using media::InMemoryUrlProtocol;
using std::vector;
@@ -49,13 +51,6 @@ bool DecodeAudioFileData(
file_sample_rate > media::limits::kMaxSampleRate)
return false;
- // TODO(crogers) : do sample-rate conversion with FFmpeg.
- // For now, we're ignoring the requested 'sample_rate' and returning
- // the WebAudioBus at the file's sample-rate.
- // double destination_sample_rate =
- // (sample_rate != 0.0) ? sample_rate : file_sample_rate;
- double destination_sample_rate = file_sample_rate;
-
DVLOG(1) << "Decoding file data -"
<< " data: " << data
<< " data size: " << data_size
@@ -64,14 +59,10 @@ bool DecodeAudioFileData(
<< " sample rate: " << file_sample_rate
<< " number of channels: " << number_of_channels;
- // Change to destination sample-rate.
- number_of_frames = static_cast<size_t>(number_of_frames *
- (destination_sample_rate / file_sample_rate));
-
// Allocate and configure the output audio channel data.
destination_bus->initialize(number_of_channels,
number_of_frames,
- destination_sample_rate);
+ file_sample_rate);
// Wrap the channel pointers which will receive the decoded PCM audio.
vector<float*> audio_data;
@@ -80,8 +71,11 @@ bool DecodeAudioFileData(
audio_data.push_back(destination_bus->channelData(i));
}
+ scoped_ptr<AudioBus> audio_bus = AudioBus::WrapVector(
+ number_of_frames, audio_data);
+
// Decode the audio file data.
- return reader.Read(audio_data, number_of_frames);
+ return reader.Read(audio_bus.get());
}
} // namespace webkit_media