summaryrefslogtreecommitdiffstats
path: root/media
diff options
context:
space:
mode:
authorvrk@google.com <vrk@google.com@0039d316-1c4b-4281-b951-d872f2087c98>2012-03-21 20:55:23 +0000
committervrk@google.com <vrk@google.com@0039d316-1c4b-4281-b951-d872f2087c98>2012-03-21 20:55:23 +0000
commitcfb09c2d58ecb16b14192619695ea54570344ca7 (patch)
tree4d55c178533d474429eef44382fdc93c9fa0e28f /media
parentef12d1e6acb871dbd01f330d0b10ada24d209371 (diff)
downloadchromium_src-cfb09c2d58ecb16b14192619695ea54570344ca7.zip
chromium_src-cfb09c2d58ecb16b14192619695ea54570344ca7.tar.gz
chromium_src-cfb09c2d58ecb16b14192619695ea54570344ca7.tar.bz2
Make AudioParameters a class instead of a struct
Also collapses some long parameter lists into AudioParameters and moves some of the hardcoded values (e.g. 16 bit audio in AudioDevice) to more appropriate locations. BUG=115902 TEST=manually testing everything works out Review URL: https://chromiumcodereview.appspot.com/9655018 git-svn-id: svn://svn.chromium.org/chrome/trunk/src@128054 0039d316-1c4b-4281-b951-d872f2087c98
Diffstat (limited to 'media')
-rw-r--r--media/audio/android/audio_manager_android.cc8
-rw-r--r--media/audio/audio_input_controller.cc4
-rw-r--r--media/audio/audio_input_unittest.cc2
-rw-r--r--media/audio/audio_input_volume_unittest.cc3
-rw-r--r--media/audio/audio_manager_base.cc14
-rw-r--r--media/audio/audio_output_dispatcher.cc4
-rw-r--r--media/audio/audio_parameters.cc82
-rw-r--r--media/audio/audio_parameters.h42
-rw-r--r--media/audio/audio_parameters_unittest.cc40
-rw-r--r--media/audio/audio_util.cc26
-rw-r--r--media/audio/audio_util.h11
-rw-r--r--media/audio/fake_audio_input_stream.cc6
-rw-r--r--media/audio/fake_audio_output_stream.cc10
-rw-r--r--media/audio/fake_audio_output_stream.h2
-rw-r--r--media/audio/linux/alsa_input.cc67
-rw-r--r--media/audio/linux/alsa_input.h8
-rw-r--r--media/audio/linux/alsa_output.cc49
-rw-r--r--media/audio/linux/alsa_output_unittest.cc14
-rw-r--r--media/audio/linux/audio_manager_linux.cc8
-rw-r--r--media/audio/mac/audio_input_mac.cc10
-rw-r--r--media/audio/mac/audio_low_latency_input_mac.cc16
-rw-r--r--media/audio/mac/audio_low_latency_input_mac.h2
-rw-r--r--media/audio/mac/audio_low_latency_output_mac.cc14
-rw-r--r--media/audio/mac/audio_low_latency_output_mac.h2
-rw-r--r--media/audio/mac/audio_manager_mac.cc8
-rw-r--r--media/audio/mac/audio_output_mac.cc18
-rw-r--r--media/audio/pulse/pulse_output.h4
-rw-r--r--media/audio/simple_sources_unittest.cc2
-rw-r--r--media/audio/win/audio_low_latency_input_win.cc18
-rw-r--r--media/audio/win/audio_low_latency_input_win.h2
-rw-r--r--media/audio/win/audio_low_latency_output_win.cc18
-rw-r--r--media/audio/win/audio_low_latency_output_win.h4
-rw-r--r--media/audio/win/audio_manager_win.cc12
-rw-r--r--media/audio/win/wavein_input_win.cc10
-rw-r--r--media/audio/win/waveout_output_win.cc16
-rw-r--r--media/base/audio_renderer_sink.h5
-rw-r--r--media/base/channel_layout.h18
-rw-r--r--media/ffmpeg/ffmpeg_common.cc16
38 files changed, 310 insertions, 285 deletions
diff --git a/media/audio/android/audio_manager_android.cc b/media/audio/android/audio_manager_android.cc
index 81264b5..f34c2c4 100644
--- a/media/audio/android/audio_manager_android.cc
+++ b/media/audio/android/audio_manager_android.cc
@@ -42,24 +42,24 @@ void AudioManagerAndroid::UnMuteAll() {
AudioOutputStream* AudioManagerAndroid::MakeLinearOutputStream(
const AudioParameters& params) {
- DCHECK_EQ(AudioParameters::AUDIO_PCM_LINEAR, params.format);
+ DCHECK_EQ(AudioParameters::AUDIO_PCM_LINEAR, params.format());
return AudioTrackOutputStream::MakeStream(params);
}
AudioOutputStream* AudioManagerAndroid::MakeLowLatencyOutputStream(
const AudioParameters& params) {
- DCHECK_EQ(AudioParameters::AUDIO_PCM_LOW_LATENCY, params.format);
+ DCHECK_EQ(AudioParameters::AUDIO_PCM_LOW_LATENCY, params.format());
return AudioTrackOutputStream::MakeStream(params);
}
AudioInputStream* AudioManagerAndroid::MakeLinearInputStream(
const AudioParameters& params, const std::string& device_id) {
- DCHECK_EQ(AudioParameters::AUDIO_PCM_LINEAR, params.format);
+ DCHECK_EQ(AudioParameters::AUDIO_PCM_LINEAR, params.format());
return FakeAudioInputStream::MakeFakeStream(this, params);
}
AudioInputStream* AudioManagerAndroid::MakeLowLatencyInputStream(
const AudioParameters& params, const std::string& device_id) {
- DCHECK_EQ(AudioParameters::AUDIO_PCM_LOW_LATENCY, params.format);
+ DCHECK_EQ(AudioParameters::AUDIO_PCM_LOW_LATENCY, params.format());
return FakeAudioInputStream::MakeFakeStream(this, params);
}
diff --git a/media/audio/audio_input_controller.cc b/media/audio/audio_input_controller.cc
index 65095c3..27903fa 100644
--- a/media/audio/audio_input_controller.cc
+++ b/media/audio/audio_input_controller.cc
@@ -43,7 +43,7 @@ scoped_refptr<AudioInputController> AudioInputController::Create(
const AudioParameters& params) {
DCHECK(audio_manager);
- if (!params.IsValid() || (params.channels > kMaxInputChannels))
+ if (!params.IsValid() || (params.channels() > kMaxInputChannels))
return NULL;
if (factory_)
@@ -76,7 +76,7 @@ scoped_refptr<AudioInputController> AudioInputController::CreateLowLatency(
DCHECK(audio_manager);
DCHECK(sync_writer);
- if (!params.IsValid() || (params.channels > kMaxInputChannels))
+ if (!params.IsValid() || (params.channels() > kMaxInputChannels))
return NULL;
// Create the AudioInputController object and ensure that it runs on
diff --git a/media/audio/audio_input_unittest.cc b/media/audio/audio_input_unittest.cc
index 35efcb8..74effda 100644
--- a/media/audio/audio_input_unittest.cc
+++ b/media/audio/audio_input_unittest.cc
@@ -90,7 +90,7 @@ TEST(AudioInputTest, SanityOnMakeParams) {
AudioParameters::Format fmt = AudioParameters::AUDIO_PCM_LINEAR;
EXPECT_TRUE(NULL == audio_man->MakeAudioInputStream(
- AudioParameters(fmt, CHANNEL_LAYOUT_7POINT1, 8000, 16,
+ AudioParameters(fmt, CHANNEL_LAYOUT_7_1, 8000, 16,
kSamplesPerPacket), AudioManagerBase::kDefaultDeviceId));
EXPECT_TRUE(NULL == audio_man->MakeAudioInputStream(
AudioParameters(fmt, CHANNEL_LAYOUT_MONO, 1024 * 1024, 16,
diff --git a/media/audio/audio_input_volume_unittest.cc b/media/audio/audio_input_volume_unittest.cc
index febdb78..99f4e83 100644
--- a/media/audio/audio_input_volume_unittest.cc
+++ b/media/audio/audio_input_volume_unittest.cc
@@ -46,8 +46,7 @@ class AudioInputVolumeTest : public ::testing::Test {
AudioInputStream* CreateAndOpenStream(const std::string& device_id) {
AudioParameters::Format format = AudioParameters::AUDIO_PCM_LOW_LATENCY;
ChannelLayout channel_layout =
- (media::GetAudioInputHardwareChannelCount(device_id) == 1) ?
- CHANNEL_LAYOUT_MONO : CHANNEL_LAYOUT_STEREO;
+ media::GetAudioInputHardwareChannelLayout(device_id);
int bits_per_sample = 16;
int sample_rate =
static_cast<int>(media::GetAudioInputHardwareSampleRate(device_id));
diff --git a/media/audio/audio_manager_base.cc b/media/audio/audio_manager_base.cc
index abe31dc..89191af 100644
--- a/media/audio/audio_manager_base.cc
+++ b/media/audio/audio_manager_base.cc
@@ -84,11 +84,11 @@ AudioOutputStream* AudioManagerBase::MakeAudioOutputStream(
}
AudioOutputStream* stream = NULL;
- if (params.format == AudioParameters::AUDIO_MOCK) {
+ if (params.format() == AudioParameters::AUDIO_MOCK) {
stream = FakeAudioOutputStream::MakeFakeStream(this, params);
- } else if (params.format == AudioParameters::AUDIO_PCM_LINEAR) {
+ } else if (params.format() == AudioParameters::AUDIO_PCM_LINEAR) {
stream = MakeLinearOutputStream(params);
- } else if (params.format == AudioParameters::AUDIO_PCM_LOW_LATENCY) {
+ } else if (params.format() == AudioParameters::AUDIO_PCM_LOW_LATENCY) {
stream = MakeLowLatencyOutputStream(params);
}
@@ -100,7 +100,7 @@ AudioOutputStream* AudioManagerBase::MakeAudioOutputStream(
AudioInputStream* AudioManagerBase::MakeAudioInputStream(
const AudioParameters& params, const std::string& device_id) {
- if (!params.IsValid() || (params.channels > kMaxInputChannels) ||
+ if (!params.IsValid() || (params.channels() > kMaxInputChannels) ||
device_id.empty()) {
DLOG(ERROR) << "Audio parameters are invalid for device " << device_id;
return NULL;
@@ -114,11 +114,11 @@ AudioInputStream* AudioManagerBase::MakeAudioInputStream(
}
AudioInputStream* stream = NULL;
- if (params.format == AudioParameters::AUDIO_MOCK) {
+ if (params.format() == AudioParameters::AUDIO_MOCK) {
stream = FakeAudioInputStream::MakeFakeStream(this, params);
- } else if (params.format == AudioParameters::AUDIO_PCM_LINEAR) {
+ } else if (params.format() == AudioParameters::AUDIO_PCM_LINEAR) {
stream = MakeLinearInputStream(params, device_id);
- } else if (params.format == AudioParameters::AUDIO_PCM_LOW_LATENCY) {
+ } else if (params.format() == AudioParameters::AUDIO_PCM_LOW_LATENCY) {
stream = MakeLowLatencyInputStream(params, device_id);
}
diff --git a/media/audio/audio_output_dispatcher.cc b/media/audio/audio_output_dispatcher.cc
index b1f265b..1ac7e8a 100644
--- a/media/audio/audio_output_dispatcher.cc
+++ b/media/audio/audio_output_dispatcher.cc
@@ -17,8 +17,8 @@ AudioOutputDispatcher::AudioOutputDispatcher(
message_loop_(MessageLoop::current()),
params_(params),
pause_delay_(base::TimeDelta::FromMilliseconds(
- 2 * params.samples_per_packet *
- base::Time::kMillisecondsPerSecond / params.sample_rate)),
+ 2 * params.frames_per_buffer() *
+ base::Time::kMillisecondsPerSecond / params.sample_rate())),
paused_proxies_(0),
ALLOW_THIS_IN_INITIALIZER_LIST(weak_this_(this)),
close_timer_(FROM_HERE,
diff --git a/media/audio/audio_parameters.cc b/media/audio/audio_parameters.cc
index 4395481..852bd12 100644
--- a/media/audio/audio_parameters.cc
+++ b/media/audio/audio_parameters.cc
@@ -1,4 +1,4 @@
-// Copyright (c) 2011 The Chromium Authors. All rights reserved.
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
@@ -7,61 +7,77 @@
#include "media/base/limits.h"
AudioParameters::AudioParameters()
- : format(AUDIO_PCM_LINEAR),
- channel_layout(CHANNEL_LAYOUT_NONE),
- sample_rate(0),
- bits_per_sample(0),
- samples_per_packet(0),
- channels(0) {
+ : format_(AUDIO_PCM_LINEAR),
+ channel_layout_(CHANNEL_LAYOUT_NONE),
+ sample_rate_(0),
+ bits_per_sample_(0),
+ frames_per_buffer_(0),
+ channels_(0) {
}
AudioParameters::AudioParameters(Format format, ChannelLayout channel_layout,
int sample_rate, int bits_per_sample,
- int samples_per_packet)
- : format(format),
- channel_layout(channel_layout),
- sample_rate(sample_rate),
- bits_per_sample(bits_per_sample),
- samples_per_packet(samples_per_packet),
- channels(ChannelLayoutToChannelCount(channel_layout)) {
+ int frames_per_buffer)
+ : format_(format),
+ channel_layout_(channel_layout),
+ sample_rate_(sample_rate),
+ bits_per_sample_(bits_per_sample),
+ frames_per_buffer_(frames_per_buffer),
+ channels_(ChannelLayoutToChannelCount(channel_layout)) {
+}
+
+void AudioParameters::Reset(Format format, ChannelLayout channel_layout,
+ int sample_rate, int bits_per_sample,
+ int frames_per_buffer) {
+ format_ = format;
+ channel_layout_ = channel_layout;
+ sample_rate_ = sample_rate;
+ bits_per_sample_ = bits_per_sample;
+ frames_per_buffer_ = frames_per_buffer;
+ channels_ = ChannelLayoutToChannelCount(channel_layout);
}
bool AudioParameters::IsValid() const {
- return (format >= 0) && (format < AUDIO_LAST_FORMAT) &&
- (channels > 0) && (channels <= media::limits::kMaxChannels) &&
- (sample_rate > 0) && (sample_rate <= media::limits::kMaxSampleRate) &&
- (bits_per_sample > 0) &&
- (bits_per_sample <= media::limits::kMaxBitsPerSample) &&
- (samples_per_packet > 0) &&
- (samples_per_packet <= media::limits::kMaxSamplesPerPacket);
+ return (format_ >= 0) && (format_ < AUDIO_LAST_FORMAT) &&
+ (channels_ > 0) && (channels_ <= media::limits::kMaxChannels) &&
+ (sample_rate_ > 0) &&
+ (sample_rate_ <= media::limits::kMaxSampleRate) &&
+ (bits_per_sample_ > 0) &&
+ (bits_per_sample_ <= media::limits::kMaxBitsPerSample) &&
+ (frames_per_buffer_ > 0) &&
+ (frames_per_buffer_ <= media::limits::kMaxSamplesPerPacket);
}
-int AudioParameters::GetPacketSize() const {
- return samples_per_packet * channels * bits_per_sample / 8;
+int AudioParameters::GetBytesPerBuffer() const {
+ return frames_per_buffer_ * GetBytesPerFrame();
}
int AudioParameters::GetBytesPerSecond() const {
- return sample_rate * channels * bits_per_sample / 8;
+ return sample_rate_ * GetBytesPerFrame();
+}
+
+int AudioParameters::GetBytesPerFrame() const {
+ return channels_ * bits_per_sample_ / 8;
}
bool AudioParameters::Compare::operator()(
const AudioParameters& a,
const AudioParameters& b) const {
- if (a.format < b.format)
+ if (a.format_ < b.format_)
return true;
- if (a.format > b.format)
+ if (a.format_ > b.format_)
return false;
- if (a.channels < b.channels)
+ if (a.channels_ < b.channels_)
return true;
- if (a.channels > b.channels)
+ if (a.channels_ > b.channels_)
return false;
- if (a.sample_rate < b.sample_rate)
+ if (a.sample_rate_ < b.sample_rate_)
return true;
- if (a.sample_rate > b.sample_rate)
+ if (a.sample_rate_ > b.sample_rate_)
return false;
- if (a.bits_per_sample < b.bits_per_sample)
+ if (a.bits_per_sample_ < b.bits_per_sample_)
return true;
- if (a.bits_per_sample > b.bits_per_sample)
+ if (a.bits_per_sample_ > b.bits_per_sample_)
return false;
- return a.samples_per_packet < b.samples_per_packet;
+ return a.frames_per_buffer_ < b.frames_per_buffer_;
}
diff --git a/media/audio/audio_parameters.h b/media/audio/audio_parameters.h
index 244d8b9..6b3ed5c 100644
--- a/media/audio/audio_parameters.h
+++ b/media/audio/audio_parameters.h
@@ -9,9 +9,8 @@
#include "media/base/channel_layout.h"
#include "media/base/media_export.h"
-// TODO(vrk): This should probably be changed to an immutable object instead of
-// a struct. See crbug.com/115902.
-struct MEDIA_EXPORT AudioParameters {
+class MEDIA_EXPORT AudioParameters {
+ public:
// Compare is useful when AudioParameters is used as a key in std::map.
class MEDIA_EXPORT Compare {
public:
@@ -33,27 +32,42 @@ struct MEDIA_EXPORT AudioParameters {
static const uint32 kAudioDATSampleRate = 48000;
AudioParameters();
- AudioParameters(Format format, ChannelLayout channel_layout, int sample_rate,
- int bits_per_sample, int samples_per_packet);
+ AudioParameters(Format format, ChannelLayout channel_layout,
+ int sample_rate, int bits_per_sample,
+ int frames_per_buffer);
+ void Reset(Format format, ChannelLayout channel_layout,
+ int sample_rate, int bits_per_sample,
+ int frames_per_buffer);
// Checks that all values are in the expected range. All limits are specified
// in media::Limits.
bool IsValid() const;
- // Returns size of audio packets in bytes.
- int GetPacketSize() const;
+ // Returns size of audio buffer in bytes.
+ int GetBytesPerBuffer() const;
// Returns the number of bytes representing one second of audio.
int GetBytesPerSecond() const;
- Format format; // Format of the stream.
- ChannelLayout channel_layout; // Order of surround sound channels.
- int sample_rate; // Sampling frequency/rate.
- int bits_per_sample; // Number of bits per sample.
- int samples_per_packet; // Size of a packet in frames.
+ // Returns the number of bytes representing a frame of audio.
+ int GetBytesPerFrame() const;
- int channels; // Number of channels. Value set based on
- // |channel_layout|.
+ Format format() const { return format_; }
+ ChannelLayout channel_layout() const { return channel_layout_; }
+ int sample_rate() const { return sample_rate_; }
+ int bits_per_sample() const { return bits_per_sample_; }
+ int frames_per_buffer() const { return frames_per_buffer_; }
+ int channels() const { return channels_; }
+
+ private:
+ Format format_; // Format of the stream.
+ ChannelLayout channel_layout_; // Order of surround sound channels.
+ int sample_rate_; // Sampling frequency/rate.
+ int bits_per_sample_; // Number of bits per sample.
+ int frames_per_buffer_; // Number of frames in a buffer.
+
+ int channels_; // Number of channels. Value set based on
+ // |channel_layout|.
};
#endif // MEDIA_AUDIO_AUDIO_PARAMETERS_H_
diff --git a/media/audio/audio_parameters_unittest.cc b/media/audio/audio_parameters_unittest.cc
index da3e746..bd3334f 100644
--- a/media/audio/audio_parameters_unittest.cc
+++ b/media/audio/audio_parameters_unittest.cc
@@ -1,4 +1,4 @@
-// Copyright (c) 2011 The Chromium Authors. All rights reserved.
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
@@ -17,12 +17,12 @@ TEST(AudioParameters, Constructor_Default) {
AudioParameters params;
- EXPECT_EQ(expected_format, params.format);
- EXPECT_EQ(expected_bits, params.bits_per_sample);
- EXPECT_EQ(expected_channels, params.channels);
- EXPECT_EQ(expected_channel_layout, params.channel_layout);
- EXPECT_EQ(expected_rate, params.sample_rate);
- EXPECT_EQ(expected_samples, params.samples_per_packet);
+ EXPECT_EQ(expected_format, params.format());
+ EXPECT_EQ(expected_bits, params.bits_per_sample());
+ EXPECT_EQ(expected_channels, params.channels());
+ EXPECT_EQ(expected_channel_layout, params.channel_layout());
+ EXPECT_EQ(expected_rate, params.sample_rate());
+ EXPECT_EQ(expected_samples, params.frames_per_buffer());
}
TEST(AudioParameters, Constructor_ParameterValues) {
@@ -30,37 +30,37 @@ TEST(AudioParameters, Constructor_ParameterValues) {
AudioParameters::AUDIO_PCM_LOW_LATENCY;
int expected_bits = 16;
int expected_channels = 6;
- ChannelLayout expected_channel_layout = CHANNEL_LAYOUT_5POINT1;
+ ChannelLayout expected_channel_layout = CHANNEL_LAYOUT_5_1;
int expected_rate = 44100;
int expected_samples = 880;
AudioParameters params(expected_format, expected_channel_layout,
expected_rate, expected_bits, expected_samples);
- EXPECT_EQ(expected_format, params.format);
- EXPECT_EQ(expected_bits, params.bits_per_sample);
- EXPECT_EQ(expected_channels, params.channels);
- EXPECT_EQ(expected_channel_layout, params.channel_layout);
- EXPECT_EQ(expected_rate, params.sample_rate);
- EXPECT_EQ(expected_samples, params.samples_per_packet);
+ EXPECT_EQ(expected_format, params.format());
+ EXPECT_EQ(expected_bits, params.bits_per_sample());
+ EXPECT_EQ(expected_channels, params.channels());
+ EXPECT_EQ(expected_channel_layout, params.channel_layout());
+ EXPECT_EQ(expected_rate, params.sample_rate());
+ EXPECT_EQ(expected_samples, params.frames_per_buffer());
}
-TEST(AudioParameters, GetPacketSize) {
+TEST(AudioParameters, GetBytesPerBuffer) {
EXPECT_EQ(100, AudioParameters(AudioParameters::AUDIO_PCM_LINEAR,
CHANNEL_LAYOUT_MONO, 1000, 8, 100)
- .GetPacketSize());
+ .GetBytesPerBuffer());
EXPECT_EQ(200, AudioParameters(AudioParameters::AUDIO_PCM_LINEAR,
CHANNEL_LAYOUT_MONO, 1000, 16, 100)
- .GetPacketSize());
+ .GetBytesPerBuffer());
EXPECT_EQ(200, AudioParameters(AudioParameters::AUDIO_PCM_LINEAR,
CHANNEL_LAYOUT_STEREO, 1000, 8, 100)
- .GetPacketSize());
+ .GetBytesPerBuffer());
EXPECT_EQ(200, AudioParameters(AudioParameters::AUDIO_PCM_LINEAR,
CHANNEL_LAYOUT_MONO, 1000, 8, 200)
- .GetPacketSize());
+ .GetBytesPerBuffer());
EXPECT_EQ(800, AudioParameters(AudioParameters::AUDIO_PCM_LINEAR,
CHANNEL_LAYOUT_STEREO, 1000, 16, 200)
- .GetPacketSize());
+ .GetBytesPerBuffer());
}
TEST(AudioParameters, GetBytesPerSecond) {
diff --git a/media/audio/audio_util.cc b/media/audio/audio_util.cc
index 7fcbe3c..3d22794 100644
--- a/media/audio/audio_util.cc
+++ b/media/audio/audio_util.cc
@@ -303,7 +303,7 @@ void MixStreams(void* dst,
}
}
-double GetAudioHardwareSampleRate() {
+int GetAudioHardwareSampleRate() {
#if defined(OS_MACOSX)
// Hardware sample-rate on the Mac can be configured, so we must query.
return AUAudioOutputStream::HardwareSampleRate();
@@ -311,7 +311,7 @@ double GetAudioHardwareSampleRate() {
if (!IsWASAPISupported()) {
// Fall back to Windows Wave implementation on Windows XP or lower
// and use 48kHz as default input sample rate.
- return 48000.0;
+ return 48000;
}
// Hardware sample-rate on Windows can be configured, so we must query.
@@ -321,22 +321,22 @@ double GetAudioHardwareSampleRate() {
#else
// Hardware for Linux is nearly always 48KHz.
// TODO(crogers) : return correct value in rare non-48KHz cases.
- return 48000.0;
+ return 48000;
#endif
}
-double GetAudioInputHardwareSampleRate(const std::string& device_id) {
+int GetAudioInputHardwareSampleRate(const std::string& device_id) {
// TODO(henrika): add support for device selection on all platforms.
// Only exists on Windows today.
#if defined(OS_MACOSX)
return AUAudioInputStream::HardwareSampleRate();
#elif defined(OS_WIN)
if (!IsWASAPISupported()) {
- return 48000.0;
+ return 48000;
}
return WASAPIAudioInputStream::HardwareSampleRate(device_id);
#else
- return 48000.0;
+ return 48000;
#endif
}
@@ -359,7 +359,7 @@ size_t GetAudioHardwareBufferSize() {
// This call must be done on a COM thread configured as MTA.
// TODO(tommi): http://code.google.com/p/chromium/issues/detail?id=103835.
int mixing_sample_rate =
- static_cast<int>(WASAPIAudioOutputStream::HardwareSampleRate(eConsole));
+ WASAPIAudioOutputStream::HardwareSampleRate(eConsole);
if (mixing_sample_rate == 48000)
return 480;
else if (mixing_sample_rate == 44100)
@@ -371,21 +371,21 @@ size_t GetAudioHardwareBufferSize() {
#endif
}
-uint32 GetAudioInputHardwareChannelCount(const std::string& device_id) {
+ChannelLayout GetAudioInputHardwareChannelLayout(const std::string& device_id) {
// TODO(henrika): add support for device selection on all platforms.
// Only exists on Windows today.
- enum channel_layout { MONO = 1, STEREO = 2 };
#if defined(OS_MACOSX)
- return MONO;
+ return CHANNEL_LAYOUT_MONO;
#elif defined(OS_WIN)
if (!IsWASAPISupported()) {
// Fall back to Windows Wave implementation on Windows XP or lower and
// use stereo by default.
- return STEREO;
+ return CHANNEL_LAYOUT_STEREO;
}
- return WASAPIAudioInputStream::HardwareChannelCount(device_id);
+ return WASAPIAudioInputStream::HardwareChannelCount(device_id) == 1 ?
+ CHANNEL_LAYOUT_MONO : CHANNEL_LAYOUT_STEREO;
#else
- return STEREO;
+ return CHANNEL_LAYOUT_STEREO;
#endif
}
diff --git a/media/audio/audio_util.h b/media/audio/audio_util.h
index 81f6da2..11ee38e 100644
--- a/media/audio/audio_util.h
+++ b/media/audio/audio_util.h
@@ -9,10 +9,9 @@
#include <vector>
#include "base/basictypes.h"
+#include "media/base/channel_layout.h"
#include "media/base/media_export.h"
-struct AudioParameters;
-
namespace base {
class SharedMemory;
}
@@ -92,10 +91,10 @@ MEDIA_EXPORT void InterleaveFloatToInt16(const std::vector<float*>& source,
size_t number_of_frames);
// Returns the default audio output hardware sample-rate.
-MEDIA_EXPORT double GetAudioHardwareSampleRate();
+MEDIA_EXPORT int GetAudioHardwareSampleRate();
// Returns the audio input hardware sample-rate for the specified device.
-MEDIA_EXPORT double GetAudioInputHardwareSampleRate(
+MEDIA_EXPORT int GetAudioInputHardwareSampleRate(
const std::string& device_id);
// Returns the optimal low-latency buffer size for the audio hardware.
@@ -103,8 +102,8 @@ MEDIA_EXPORT double GetAudioInputHardwareSampleRate(
// at without glitches. The buffer size is in sample-frames.
MEDIA_EXPORT size_t GetAudioHardwareBufferSize();
-// Returns the number of channels for the specified audio input device.
-MEDIA_EXPORT uint32 GetAudioInputHardwareChannelCount(
+// Returns the channel layout for the specified audio input device.
+MEDIA_EXPORT ChannelLayout GetAudioInputHardwareChannelLayout(
const std::string& device_id);
// Functions that handle data buffer passed between processes in the shared
diff --git a/media/audio/fake_audio_input_stream.cc b/media/audio/fake_audio_input_stream.cc
index f1284f1..0e4bb61 100644
--- a/media/audio/fake_audio_input_stream.cc
+++ b/media/audio/fake_audio_input_stream.cc
@@ -20,11 +20,11 @@ FakeAudioInputStream::FakeAudioInputStream(AudioManagerBase* manager,
const AudioParameters& params)
: audio_manager_(manager),
callback_(NULL),
- buffer_size_((params.channels * params.bits_per_sample *
- params.samples_per_packet) / 8),
+ buffer_size_((params.channels() * params.bits_per_sample() *
+ params.frames_per_buffer()) / 8),
thread_("FakeAudioRecordingThread"),
callback_interval_(base::TimeDelta::FromMilliseconds(
- (params.samples_per_packet * 1000) / params.sample_rate)) {
+ (params.frames_per_buffer() * 1000) / params.sample_rate())) {
}
FakeAudioInputStream::~FakeAudioInputStream() {}
diff --git a/media/audio/fake_audio_output_stream.cc b/media/audio/fake_audio_output_stream.cc
index 1831626..14a38cd 100644
--- a/media/audio/fake_audio_output_stream.cc
+++ b/media/audio/fake_audio_output_stream.cc
@@ -22,9 +22,9 @@ AudioOutputStream* FakeAudioOutputStream::MakeFakeStream(
}
bool FakeAudioOutputStream::Open() {
- if (packet_size_ < sizeof(int16))
+ if (bytes_per_buffer_ < sizeof(int16))
return false;
- buffer_.reset(new uint8[packet_size_]);
+ buffer_.reset(new uint8[bytes_per_buffer_]);
return true;
}
@@ -35,8 +35,8 @@ FakeAudioOutputStream* FakeAudioOutputStream::GetCurrentFakeStream() {
void FakeAudioOutputStream::Start(AudioSourceCallback* callback) {
callback_ = callback;
- memset(buffer_.get(), 0, packet_size_);
- callback_->OnMoreData(this, buffer_.get(), packet_size_,
+ memset(buffer_.get(), 0, bytes_per_buffer_);
+ callback_->OnMoreData(this, buffer_.get(), bytes_per_buffer_,
AudioBuffersState(0, 0));
}
@@ -62,7 +62,7 @@ FakeAudioOutputStream::FakeAudioOutputStream(AudioManagerBase* manager,
: audio_manager_(manager),
volume_(0),
callback_(NULL),
- packet_size_(params.GetPacketSize()),
+ bytes_per_buffer_(params.GetBytesPerBuffer()),
closed_(false) {
}
diff --git a/media/audio/fake_audio_output_stream.h b/media/audio/fake_audio_output_stream.h
index d0a7e82..19ed576 100644
--- a/media/audio/fake_audio_output_stream.h
+++ b/media/audio/fake_audio_output_stream.h
@@ -46,7 +46,7 @@ class MEDIA_EXPORT FakeAudioOutputStream : public AudioOutputStream {
double volume_;
AudioSourceCallback* callback_;
scoped_array<uint8> buffer_;
- uint32 packet_size_;
+ uint32 bytes_per_buffer_;
bool closed_;
DISALLOW_COPY_AND_ASSIGN(FakeAudioOutputStream);
diff --git a/media/audio/linux/alsa_input.cc b/media/audio/linux/alsa_input.cc
index 994c065..43fe8e4d 100644
--- a/media/audio/linux/alsa_input.cc
+++ b/media/audio/linux/alsa_input.cc
@@ -29,12 +29,12 @@ AlsaPcmInputStream::AlsaPcmInputStream(AudioManagerLinux* audio_manager,
: audio_manager_(audio_manager),
device_name_(device_name),
params_(params),
- bytes_per_packet_(params.samples_per_packet *
- (params.channels * params.bits_per_sample) / 8),
+ bytes_per_buffer_(params.frames_per_buffer() *
+ (params.channels() * params.bits_per_sample()) / 8),
wrapper_(wrapper),
- packet_duration_ms_(
- (params.samples_per_packet * base::Time::kMillisecondsPerSecond) /
- params.sample_rate),
+ buffer_duration_ms_(
+ (params.frames_per_buffer() * base::Time::kMillisecondsPerSecond) /
+ params.sample_rate()),
callback_(NULL),
device_handle_(NULL),
mixer_handle_(NULL),
@@ -50,14 +50,14 @@ bool AlsaPcmInputStream::Open() {
return false; // Already open.
snd_pcm_format_t pcm_format = alsa_util::BitsToFormat(
- params_.bits_per_sample);
+ params_.bits_per_sample());
if (pcm_format == SND_PCM_FORMAT_UNKNOWN) {
LOG(WARNING) << "Unsupported bits per sample: "
- << params_.bits_per_sample;
+ << params_.bits_per_sample();
return false;
}
- uint32 latency_us = packet_duration_ms_ * kNumPacketsInRingBuffer *
+ uint32 latency_us = buffer_duration_ms_ * kNumPacketsInRingBuffer *
base::Time::kMicrosecondsPerMillisecond;
// Use the same minimum required latency as output.
@@ -66,10 +66,10 @@ bool AlsaPcmInputStream::Open() {
if (device_name_ == kAutoSelectDevice) {
const char* device_names[] = { kDefaultDevice1, kDefaultDevice2 };
for (size_t i = 0; i < arraysize(device_names); ++i) {
- device_handle_ = alsa_util::OpenCaptureDevice(wrapper_, device_names[i],
- params_.channels,
- params_.sample_rate,
- pcm_format, latency_us);
+ device_handle_ = alsa_util::OpenCaptureDevice(
+ wrapper_, device_names[i], params_.channels(),
+ params_.sample_rate(), pcm_format, latency_us);
+
if (device_handle_) {
device_name_ = device_names[i];
break;
@@ -78,13 +78,13 @@ bool AlsaPcmInputStream::Open() {
} else {
device_handle_ = alsa_util::OpenCaptureDevice(wrapper_,
device_name_.c_str(),
- params_.channels,
- params_.sample_rate,
+ params_.channels(),
+ params_.sample_rate(),
pcm_format, latency_us);
}
if (device_handle_) {
- audio_packet_.reset(new uint8[bytes_per_packet_]);
+ audio_buffer_.reset(new uint8[bytes_per_buffer_]);
// Open the microphone mixer.
mixer_handle_ = alsa_util::OpenMixer(wrapper_, device_name_);
@@ -112,11 +112,11 @@ void AlsaPcmInputStream::Start(AudioInputCallback* callback) {
if (error < 0) {
callback_ = NULL;
} else {
- // We start reading data half |packet_duration_ms_| later than when the
- // packet might have got filled, to accommodate some delays in the audio
+ // We start reading data half |buffer_duration_ms_| later than when the
+ // buffer might have got filled, to accommodate some delays in the audio
// driver. This could also give us a smooth read sequence going forward.
base::TimeDelta delay = base::TimeDelta::FromMilliseconds(
- packet_duration_ms_ + packet_duration_ms_ / 2);
+ buffer_duration_ms_ + buffer_duration_ms_ / 2);
next_read_time_ = base::Time::Now() + delay;
MessageLoop::current()->PostDelayedTask(
FROM_HERE,
@@ -176,7 +176,7 @@ void AlsaPcmInputStream::ReadAudio() {
Recover(frames);
}
- if (frames < params_.samples_per_packet) {
+ if (frames < params_.frames_per_buffer()) {
// Not enough data yet or error happened. In both cases wait for a very
// small duration before checking again.
// Even Though read callback was behind schedule, there is no data, so
@@ -187,7 +187,7 @@ void AlsaPcmInputStream::ReadAudio() {
}
base::TimeDelta next_check_time = base::TimeDelta::FromMilliseconds(
- packet_duration_ms_ / 2);
+ buffer_duration_ms_ / 2);
MessageLoop::current()->PostDelayedTask(
FROM_HERE,
base::Bind(&AlsaPcmInputStream::ReadAudio, weak_factory_.GetWeakPtr()),
@@ -195,30 +195,29 @@ void AlsaPcmInputStream::ReadAudio() {
return;
}
- int num_packets = frames / params_.samples_per_packet;
- int num_packets_read = num_packets;
- int bytes_per_frame = params_.channels * params_.bits_per_sample / 8;
+ int num_buffers = frames / params_.frames_per_buffer();
+ int num_buffers_read = num_buffers;
uint32 hardware_delay_bytes =
- static_cast<uint32>(GetCurrentDelay() * bytes_per_frame);
- while (num_packets--) {
- int frames_read = wrapper_->PcmReadi(device_handle_, audio_packet_.get(),
- params_.samples_per_packet);
- if (frames_read == params_.samples_per_packet) {
- callback_->OnData(this, audio_packet_.get(), bytes_per_packet_,
+ static_cast<uint32>(GetCurrentDelay() * params_.GetBytesPerFrame());
+ while (num_buffers--) {
+ int frames_read = wrapper_->PcmReadi(device_handle_, audio_buffer_.get(),
+ params_.frames_per_buffer());
+ if (frames_read == params_.frames_per_buffer()) {
+ callback_->OnData(this, audio_buffer_.get(), bytes_per_buffer_,
hardware_delay_bytes);
} else {
LOG(WARNING) << "PcmReadi returning less than expected frames: "
- << frames_read << " vs. " << params_.samples_per_packet
- << ". Dropping this packet.";
+ << frames_read << " vs. " << params_.frames_per_buffer()
+ << ". Dropping this buffer.";
}
}
next_read_time_ += base::TimeDelta::FromMilliseconds(
- packet_duration_ms_ * num_packets_read);
+ buffer_duration_ms_ * num_buffers_read);
base::TimeDelta delay = next_read_time_ - base::Time::Now();
if (delay < base::TimeDelta()) {
LOG(WARNING) << "Audio read callback behind schedule by "
- << (packet_duration_ms_ - delay.InMilliseconds())
+ << (buffer_duration_ms_ - delay.InMilliseconds())
<< " (ms).";
// Read callback is behind schedule. Assuming there is data pending in
// the soundcard, invoke the read callback immediate in order to catch up.
@@ -256,7 +255,7 @@ void AlsaPcmInputStream::Close() {
if (mixer_handle_)
alsa_util::CloseMixer(wrapper_, mixer_handle_, device_name_);
- audio_packet_.reset();
+ audio_buffer_.reset();
device_handle_ = NULL;
mixer_handle_ = NULL;
mixer_element_handle_ = NULL;
diff --git a/media/audio/linux/alsa_input.h b/media/audio/linux/alsa_input.h
index 5f3b467..bbe965d 100644
--- a/media/audio/linux/alsa_input.h
+++ b/media/audio/linux/alsa_input.h
@@ -51,7 +51,7 @@ class AlsaPcmInputStream : public AudioInputStream {
// Logs the error and invokes any registered callbacks.
void HandleError(const char* method, int error);
- // Reads one or more packets of audio from the device, passes on to the
+ // Reads one or more buffers of audio from the device, passes on to the
// registered callback and schedules the next read.
void ReadAudio();
@@ -69,16 +69,16 @@ class AlsaPcmInputStream : public AudioInputStream {
AudioManagerLinux* audio_manager_;
std::string device_name_;
AudioParameters params_;
- int bytes_per_packet_;
+ int bytes_per_buffer_;
AlsaWrapper* wrapper_;
- int packet_duration_ms_; // Length of each recorded packet in milliseconds.
+ int buffer_duration_ms_; // Length of each recorded buffer in milliseconds.
AudioInputCallback* callback_; // Valid during a recording session.
base::Time next_read_time_; // Scheduled time for the next read callback.
snd_pcm_t* device_handle_; // Handle to the ALSA PCM recording device.
snd_mixer_t* mixer_handle_; // Handle to the ALSA microphone mixer.
snd_mixer_elem_t* mixer_element_handle_; // Handle to the capture element.
base::WeakPtrFactory<AlsaPcmInputStream> weak_factory_;
- scoped_array<uint8> audio_packet_; // Buffer used for reading audio data.
+ scoped_array<uint8> audio_buffer_; // Buffer used for reading audio data.
bool read_callback_behind_schedule_;
DISALLOW_COPY_AND_ASSIGN(AlsaPcmInputStream);
diff --git a/media/audio/linux/alsa_output.cc b/media/audio/linux/alsa_output.cc
index 10b0a54..8ba86df6 100644
--- a/media/audio/linux/alsa_output.cc
+++ b/media/audio/linux/alsa_output.cc
@@ -151,15 +151,15 @@ AlsaPcmOutputStream::AlsaPcmOutputStream(const std::string& device_name,
AlsaWrapper* wrapper,
AudioManagerLinux* manager)
: requested_device_name_(device_name),
- pcm_format_(alsa_util::BitsToFormat(params.bits_per_sample)),
- channels_(params.channels),
- sample_rate_(params.sample_rate),
- bytes_per_sample_(params.bits_per_sample / 8),
- bytes_per_frame_(channels_ * params.bits_per_sample / 8),
+ pcm_format_(alsa_util::BitsToFormat(params.bits_per_sample())),
+ channels_(params.channels()),
+ sample_rate_(params.sample_rate()),
+ bytes_per_sample_(params.bits_per_sample() / 8),
+ bytes_per_frame_(channels_ * params.bits_per_sample() / 8),
should_downmix_(false),
- packet_size_(params.GetPacketSize()),
+ packet_size_(params.GetBytesPerBuffer()),
micros_per_packet_(FramesToMicros(
- params.samples_per_packet, sample_rate_)),
+ params.frames_per_buffer(), sample_rate_)),
latency_micros_(std::max(AlsaPcmOutputStream::kMinLatencyMicros,
micros_per_packet_ * 2)),
bytes_per_output_frame_(bytes_per_frame_),
@@ -176,19 +176,20 @@ AlsaPcmOutputStream::AlsaPcmOutputStream(const std::string& device_name,
DCHECK(IsOnAudioThread());
// Sanity check input values.
- if ((params.sample_rate > kAlsaMaxSampleRate) || (params.sample_rate <= 0)) {
+ if (params.sample_rate() > kAlsaMaxSampleRate ||
+ params.sample_rate() <= 0) {
LOG(WARNING) << "Unsupported audio frequency.";
TransitionTo(kInError);
}
- if (AudioParameters::AUDIO_PCM_LINEAR != params.format &&
- AudioParameters::AUDIO_PCM_LOW_LATENCY != params.format) {
+ if (AudioParameters::AUDIO_PCM_LINEAR != params.format() &&
+ AudioParameters::AUDIO_PCM_LOW_LATENCY != params.format()) {
LOG(WARNING) << "Unsupported audio format";
TransitionTo(kInError);
}
if (pcm_format_ == SND_PCM_FORMAT_UNKNOWN) {
- LOG(WARNING) << "Unsupported bits per sample: " << params.bits_per_sample;
+ LOG(WARNING) << "Unsupported bits per sample: " << params.bits_per_sample();
TransitionTo(kInError);
}
}
@@ -225,11 +226,9 @@ bool AlsaPcmOutputStream::Open() {
DVLOG(1) << "Auto-selected device: " << device_name_;
} else {
device_name_ = requested_device_name_;
- playback_handle_ = alsa_util::OpenPlaybackDevice(wrapper_,
- device_name_.c_str(),
- channels_, sample_rate_,
- pcm_format_,
- latency_micros_);
+ playback_handle_ = alsa_util::OpenPlaybackDevice(
+ wrapper_, device_name_.c_str(), channels_, sample_rate_,
+ pcm_format_, latency_micros_);
}
// Finish initializing the stream if the device was opened successfully.
@@ -557,11 +556,13 @@ void AlsaPcmOutputStream::ScheduleNextWrite(bool source_exhausted) {
}
}
-uint32 AlsaPcmOutputStream::FramesToMicros(uint32 frames, uint32 sample_rate) {
+uint32 AlsaPcmOutputStream::FramesToMicros(uint32 frames,
+ uint32 sample_rate) {
return frames * base::Time::kMicrosecondsPerSecond / sample_rate;
}
-uint32 AlsaPcmOutputStream::FramesToMillis(uint32 frames, uint32 sample_rate) {
+uint32 AlsaPcmOutputStream::FramesToMillis(uint32 frames,
+ uint32 sample_rate) {
return frames * base::Time::kMillisecondsPerSecond / sample_rate;
}
@@ -707,17 +708,17 @@ snd_pcm_t* AlsaPcmOutputStream::AutoSelectDevice(unsigned int latency) {
// Step 3.
device_name_ = kDefaultDevice;
- if ((handle = alsa_util::OpenPlaybackDevice(wrapper_, device_name_.c_str(),
- default_channels, sample_rate_,
- pcm_format_, latency)) != NULL) {
+ if ((handle = alsa_util::OpenPlaybackDevice(
+ wrapper_, device_name_.c_str(), default_channels, sample_rate_,
+ pcm_format_, latency)) != NULL) {
return handle;
}
// Step 4.
device_name_ = kPlugPrefix + device_name_;
- if ((handle = alsa_util::OpenPlaybackDevice(wrapper_, device_name_.c_str(),
- default_channels, sample_rate_,
- pcm_format_, latency)) != NULL) {
+ if ((handle = alsa_util::OpenPlaybackDevice(
+ wrapper_, device_name_.c_str(), default_channels, sample_rate_,
+ pcm_format_, latency)) != NULL) {
return handle;
}
diff --git a/media/audio/linux/alsa_output_unittest.cc b/media/audio/linux/alsa_output_unittest.cc
index 8658996..adc06e4 100644
--- a/media/audio/linux/alsa_output_unittest.cc
+++ b/media/audio/linux/alsa_output_unittest.cc
@@ -658,11 +658,11 @@ TEST_F(AlsaPcmOutputStreamTest, AutoSelectDevice_DeviceSelect) {
CHANNEL_LAYOUT_MONO,
CHANNEL_LAYOUT_STEREO,
CHANNEL_LAYOUT_SURROUND,
- CHANNEL_LAYOUT_4POINT0,
- CHANNEL_LAYOUT_5POINT0,
- CHANNEL_LAYOUT_5POINT1,
- CHANNEL_LAYOUT_7POINT0,
- CHANNEL_LAYOUT_7POINT1 };
+ CHANNEL_LAYOUT_4_0,
+ CHANNEL_LAYOUT_5_0,
+ CHANNEL_LAYOUT_5_1,
+ CHANNEL_LAYOUT_7_0,
+ CHANNEL_LAYOUT_7_1 };
for (int i = 1; i < 9; ++i) {
@@ -746,7 +746,7 @@ TEST_F(AlsaPcmOutputStreamTest, AutoSelectDevice_FallbackDevices) {
EXPECT_CALL(mock_alsa_wrapper_, PcmOpen(_, StrEq(fourth_try.c_str()), _, _))
.WillOnce(Return(kTestFailedErrno));
- AlsaPcmOutputStream* test_stream = CreateStream(CHANNEL_LAYOUT_5POINT0);
+ AlsaPcmOutputStream* test_stream = CreateStream(CHANNEL_LAYOUT_5_0);
EXPECT_FALSE(test_stream->AutoSelectDevice(5));
test_stream->Close();
}
@@ -765,7 +765,7 @@ TEST_F(AlsaPcmOutputStreamTest, AutoSelectDevice_HintFail) {
EXPECT_CALL(mock_alsa_wrapper_, StrError(kTestFailedErrno))
.WillOnce(Return(kDummyMessage));
- AlsaPcmOutputStream* test_stream = CreateStream(CHANNEL_LAYOUT_5POINT0);
+ AlsaPcmOutputStream* test_stream = CreateStream(CHANNEL_LAYOUT_5_0);
EXPECT_TRUE(test_stream->AutoSelectDevice(5));
EXPECT_TRUE(test_stream->should_downmix_);
test_stream->Close();
diff --git a/media/audio/linux/audio_manager_linux.cc b/media/audio/linux/audio_manager_linux.cc
index faf9300..c081c03 100644
--- a/media/audio/linux/audio_manager_linux.cc
+++ b/media/audio/linux/audio_manager_linux.cc
@@ -238,25 +238,25 @@ bool AudioManagerLinux::HasAnyAlsaAudioDevice(StreamType stream) {
AudioOutputStream* AudioManagerLinux::MakeLinearOutputStream(
const AudioParameters& params) {
- DCHECK_EQ(AudioParameters::AUDIO_PCM_LINEAR, params.format);
+ DCHECK_EQ(AudioParameters::AUDIO_PCM_LINEAR, params.format());
return MakeOutputStream(params);
}
AudioOutputStream* AudioManagerLinux::MakeLowLatencyOutputStream(
const AudioParameters& params) {
- DCHECK_EQ(AudioParameters::AUDIO_PCM_LOW_LATENCY, params.format);
+ DCHECK_EQ(AudioParameters::AUDIO_PCM_LOW_LATENCY, params.format());
return MakeOutputStream(params);
}
AudioInputStream* AudioManagerLinux::MakeLinearInputStream(
const AudioParameters& params, const std::string& device_id) {
- DCHECK_EQ(AudioParameters::AUDIO_PCM_LINEAR, params.format);
+ DCHECK_EQ(AudioParameters::AUDIO_PCM_LINEAR, params.format());
return MakeInputStream(params, device_id);
}
AudioInputStream* AudioManagerLinux::MakeLowLatencyInputStream(
const AudioParameters& params, const std::string& device_id) {
- DCHECK_EQ(AudioParameters::AUDIO_PCM_LOW_LATENCY, params.format);
+ DCHECK_EQ(AudioParameters::AUDIO_PCM_LOW_LATENCY, params.format());
return MakeInputStream(params, device_id);
}
diff --git a/media/audio/mac/audio_input_mac.cc b/media/audio/mac/audio_input_mac.cc
index e886ddf..61ab281 100644
--- a/media/audio/mac/audio_input_mac.cc
+++ b/media/audio/mac/audio_input_mac.cc
@@ -30,18 +30,18 @@ PCMQueueInAudioInputStream::PCMQueueInAudioInputStream(
// A frame is one sample across all channels. In interleaved audio the per
// frame fields identify the set of n |channels|. In uncompressed audio, a
// packet is always one frame.
- format_.mSampleRate = params.sample_rate;
+ format_.mSampleRate = params.sample_rate();
format_.mFormatID = kAudioFormatLinearPCM;
format_.mFormatFlags = kLinearPCMFormatFlagIsPacked |
kLinearPCMFormatFlagIsSignedInteger;
- format_.mBitsPerChannel = params.bits_per_sample;
- format_.mChannelsPerFrame = params.channels;
+ format_.mBitsPerChannel = params.bits_per_sample();
+ format_.mChannelsPerFrame = params.channels();
format_.mFramesPerPacket = 1;
- format_.mBytesPerPacket = (params.bits_per_sample * params.channels) / 8;
+ format_.mBytesPerPacket = (params.bits_per_sample() * params.channels()) / 8;
format_.mBytesPerFrame = format_.mBytesPerPacket;
format_.mReserved = 0;
- buffer_size_bytes_ = params.GetPacketSize();
+ buffer_size_bytes_ = params.GetBytesPerBuffer();
}
PCMQueueInAudioInputStream::~PCMQueueInAudioInputStream() {
diff --git a/media/audio/mac/audio_low_latency_input_mac.cc b/media/audio/mac/audio_low_latency_input_mac.cc
index 22b6df5..ddd12ed 100644
--- a/media/audio/mac/audio_low_latency_input_mac.cc
+++ b/media/audio/mac/audio_low_latency_input_mac.cc
@@ -42,22 +42,22 @@ AUAudioInputStream::AUAudioInputStream(
DCHECK(manager_);
// Set up the desired (output) format specified by the client.
- format_.mSampleRate = params.sample_rate;
+ format_.mSampleRate = params.sample_rate();
format_.mFormatID = kAudioFormatLinearPCM;
format_.mFormatFlags = kLinearPCMFormatFlagIsPacked |
kLinearPCMFormatFlagIsSignedInteger;
- format_.mBitsPerChannel = params.bits_per_sample;
- format_.mChannelsPerFrame = params.channels;
+ format_.mBitsPerChannel = params.bits_per_sample();
+ format_.mChannelsPerFrame = params.channels();
format_.mFramesPerPacket = 1; // uncompressed audio
format_.mBytesPerPacket = (format_.mBitsPerChannel *
- params.channels) / 8;
+ params.channels()) / 8;
format_.mBytesPerFrame = format_.mBytesPerPacket;
format_.mReserved = 0;
DVLOG(1) << "Desired ouput format: " << format_;
// Calculate the number of sample frames per callback.
- number_of_frames_ = params.GetPacketSize() / format_.mBytesPerPacket;
+ number_of_frames_ = params.GetBytesPerBuffer() / format_.mBytesPerPacket;
DVLOG(1) << "Number of frames per callback: " << number_of_frames_;
// Derive size (in bytes) of the buffers that we will render to.
@@ -71,7 +71,7 @@ AUAudioInputStream::AUAudioInputStream(
audio_buffer_list_.mNumberBuffers = 1;
AudioBuffer* audio_buffer = audio_buffer_list_.mBuffers;
- audio_buffer->mNumberChannels = params.channels;
+ audio_buffer->mNumberChannels = params.channels();
audio_buffer->mDataByteSize = data_byte_size;
audio_buffer->mData = audio_data_buffer_.get();
}
@@ -446,7 +446,7 @@ OSStatus AUAudioInputStream::Provide(UInt32 number_of_frames,
return noErr;
}
-double AUAudioInputStream::HardwareSampleRate() {
+int AUAudioInputStream::HardwareSampleRate() {
// Determine the default input device's sample-rate.
AudioDeviceID device_id = kAudioObjectUnknown;
UInt32 info_size = sizeof(device_id);
@@ -484,7 +484,7 @@ double AUAudioInputStream::HardwareSampleRate() {
if (result)
return 0.0;
- return nominal_sample_rate;
+ return static_cast<int>(nominal_sample_rate);
}
double AUAudioInputStream::GetHardwareLatency() {
diff --git a/media/audio/mac/audio_low_latency_input_mac.h b/media/audio/mac/audio_low_latency_input_mac.h
index 52609d6..6d1b899 100644
--- a/media/audio/mac/audio_low_latency_input_mac.h
+++ b/media/audio/mac/audio_low_latency_input_mac.h
@@ -66,7 +66,7 @@ class AUAudioInputStream : public AudioInputStream {
virtual double GetVolume() OVERRIDE;
// Returns the current hardware sample rate for the default input device.
- MEDIA_EXPORT static double HardwareSampleRate();
+ MEDIA_EXPORT static int HardwareSampleRate();
bool started() const { return started_; }
AudioUnit audio_unit() { return audio_unit_; }
diff --git a/media/audio/mac/audio_low_latency_output_mac.cc b/media/audio/mac/audio_low_latency_output_mac.cc
index f6fabde..af23e67 100644
--- a/media/audio/mac/audio_low_latency_output_mac.cc
+++ b/media/audio/mac/audio_low_latency_output_mac.cc
@@ -56,19 +56,19 @@ AUAudioOutputStream::AUAudioOutputStream(
// A frame is one sample across all channels. In interleaved audio the per
// frame fields identify the set of n |channels|. In uncompressed audio, a
// packet is always one frame.
- format_.mSampleRate = params.sample_rate;
+ format_.mSampleRate = params.sample_rate();
format_.mFormatID = kAudioFormatLinearPCM;
format_.mFormatFlags = kLinearPCMFormatFlagIsPacked |
kLinearPCMFormatFlagIsSignedInteger;
- format_.mBitsPerChannel = params.bits_per_sample;
- format_.mChannelsPerFrame = params.channels;
+ format_.mBitsPerChannel = params.bits_per_sample();
+ format_.mChannelsPerFrame = params.channels();
format_.mFramesPerPacket = 1;
- format_.mBytesPerPacket = (format_.mBitsPerChannel * params.channels) / 8;
+ format_.mBytesPerPacket = (format_.mBitsPerChannel * params.channels()) / 8;
format_.mBytesPerFrame = format_.mBytesPerPacket;
format_.mReserved = 0;
// Calculate the number of sample frames per callback.
- number_of_frames_ = params.GetPacketSize() / format_.mBytesPerPacket;
+ number_of_frames_ = params.GetBytesPerBuffer() / format_.mBytesPerPacket;
}
AUAudioOutputStream::~AUAudioOutputStream() {
@@ -252,7 +252,7 @@ OSStatus AUAudioOutputStream::InputProc(void* user_data,
return audio_output->Render(number_of_frames, io_data, output_time_stamp);
}
-double AUAudioOutputStream::HardwareSampleRate() {
+int AUAudioOutputStream::HardwareSampleRate() {
// Determine the default output device's sample-rate.
AudioDeviceID device_id = kAudioObjectUnknown;
UInt32 info_size = sizeof(device_id);
@@ -290,7 +290,7 @@ double AUAudioOutputStream::HardwareSampleRate() {
if (result)
return 0.0; // error
- return nominal_sample_rate;
+ return static_cast<int>(nominal_sample_rate);
}
double AUAudioOutputStream::GetHardwareLatency() {
diff --git a/media/audio/mac/audio_low_latency_output_mac.h b/media/audio/mac/audio_low_latency_output_mac.h
index fe29e90..c912c18 100644
--- a/media/audio/mac/audio_low_latency_output_mac.h
+++ b/media/audio/mac/audio_low_latency_output_mac.h
@@ -46,7 +46,7 @@ class AUAudioOutputStream : public AudioOutputStream {
virtual void SetVolume(double volume) OVERRIDE;
virtual void GetVolume(double* volume) OVERRIDE;
- static double HardwareSampleRate();
+ static int HardwareSampleRate();
private:
// DefaultOutputUnit callback.
diff --git a/media/audio/mac/audio_manager_mac.cc b/media/audio/mac/audio_manager_mac.cc
index 31b4d6d..3b1161d 100644
--- a/media/audio/mac/audio_manager_mac.cc
+++ b/media/audio/mac/audio_manager_mac.cc
@@ -261,25 +261,25 @@ void AudioManagerMac::UnMuteAll() {
AudioOutputStream* AudioManagerMac::MakeLinearOutputStream(
const AudioParameters& params) {
- DCHECK_EQ(AudioParameters::AUDIO_PCM_LINEAR, params.format);
+ DCHECK_EQ(AudioParameters::AUDIO_PCM_LINEAR, params.format());
return new PCMQueueOutAudioOutputStream(this, params);
}
AudioOutputStream* AudioManagerMac::MakeLowLatencyOutputStream(
const AudioParameters& params) {
- DCHECK_EQ(AudioParameters::AUDIO_PCM_LOW_LATENCY, params.format);
+ DCHECK_EQ(AudioParameters::AUDIO_PCM_LOW_LATENCY, params.format());
return new AUAudioOutputStream(this, params);
}
AudioInputStream* AudioManagerMac::MakeLinearInputStream(
const AudioParameters& params, const std::string& device_id) {
- DCHECK_EQ(AudioParameters::AUDIO_PCM_LINEAR, params.format);
+ DCHECK_EQ(AudioParameters::AUDIO_PCM_LINEAR, params.format());
return new PCMQueueInAudioInputStream(this, params);
}
AudioInputStream* AudioManagerMac::MakeLowLatencyInputStream(
const AudioParameters& params, const std::string& device_id) {
- DCHECK_EQ(AudioParameters::AUDIO_PCM_LOW_LATENCY, params.format);
+ DCHECK_EQ(AudioParameters::AUDIO_PCM_LOW_LATENCY, params.format());
// Gets the AudioDeviceID that refers to the AudioDevice with the device
// unique id. This AudioDeviceID is used to set the device for Audio Unit.
AudioDeviceID audio_device_id = GetAudioDeviceIdByUId(true, device_id);
diff --git a/media/audio/mac/audio_output_mac.cc b/media/audio/mac/audio_output_mac.cc
index f4f4749..d2cdc57 100644
--- a/media/audio/mac/audio_output_mac.cc
+++ b/media/audio/mac/audio_output_mac.cc
@@ -48,12 +48,12 @@ PCMQueueOutAudioOutputStream::PCMQueueOutAudioOutputStream(
: audio_queue_(NULL),
source_(NULL),
manager_(manager),
- packet_size_(params.GetPacketSize()),
+ packet_size_(params.GetBytesPerBuffer()),
silence_bytes_(0),
volume_(1),
pending_bytes_(0),
- num_source_channels_(params.channels),
- source_layout_(params.channel_layout),
+ num_source_channels_(params.channels()),
+ source_layout_(params.channel_layout()),
num_core_channels_(0),
should_swizzle_(false),
should_down_mix_(false) {
@@ -62,13 +62,13 @@ PCMQueueOutAudioOutputStream::PCMQueueOutAudioOutputStream(
// A frame is one sample across all channels. In interleaved audio the per
// frame fields identify the set of n |channels|. In uncompressed audio, a
// packet is always one frame.
- format_.mSampleRate = params.sample_rate;
+ format_.mSampleRate = params.sample_rate();
format_.mFormatID = kAudioFormatLinearPCM;
format_.mFormatFlags = kLinearPCMFormatFlagIsPacked;
- format_.mBitsPerChannel = params.bits_per_sample;
- format_.mChannelsPerFrame = params.channels;
+ format_.mBitsPerChannel = params.bits_per_sample();
+ format_.mChannelsPerFrame = params.channels();
format_.mFramesPerPacket = 1;
- format_.mBytesPerPacket = (format_.mBitsPerChannel * params.channels) / 8;
+ format_.mBytesPerPacket = (format_.mBitsPerChannel * params.channels()) / 8;
format_.mBytesPerFrame = format_.mBytesPerPacket;
format_.mReserved = 0;
@@ -76,14 +76,14 @@ PCMQueueOutAudioOutputStream::PCMQueueOutAudioOutputStream(
memset(core_channel_orderings_, 0, sizeof(core_channel_orderings_));
memset(channel_remap_, 0, sizeof(channel_remap_));
- if (params.bits_per_sample > 8) {
+ if (params.bits_per_sample() > 8) {
format_.mFormatFlags |= kLinearPCMFormatFlagIsSignedInteger;
}
// Silence buffer has a duration of 6ms to simulate the behavior of Windows.
// This value is choosen by experiments and macs cannot keep up with
// anything less than 6ms.
- silence_bytes_ = format_.mBytesPerFrame * params.sample_rate * 6 / 1000;
+ silence_bytes_ = format_.mBytesPerFrame * params.sample_rate() * 6 / 1000;
}
PCMQueueOutAudioOutputStream::~PCMQueueOutAudioOutputStream() {
diff --git a/media/audio/pulse/pulse_output.h b/media/audio/pulse/pulse_output.h
index d3ef866..410ffb3 100644
--- a/media/audio/pulse/pulse_output.h
+++ b/media/audio/pulse/pulse_output.h
@@ -1,4 +1,4 @@
-// Copyright (c) 2011 The Chromium Authors. All rights reserved.
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
//
@@ -40,7 +40,7 @@ typedef AudioManagerOpenBSD AudioManagerPulse;
#error Unsupported platform
#endif
-struct AudioParameters;
+class AudioParameters;
class PulseAudioOutputStream : public AudioOutputStream {
public:
diff --git a/media/audio/simple_sources_unittest.cc b/media/audio/simple_sources_unittest.cc
index 09e4ce9..384ed0b 100644
--- a/media/audio/simple_sources_unittest.cc
+++ b/media/audio/simple_sources_unittest.cc
@@ -71,7 +71,7 @@ TEST(SimpleSources, SineWaveAudio16MonoTest) {
scoped_ptr<AudioManager> audio_man(AudioManager::Create());
AudioParameters params(
AudioParameters::AUDIO_MOCK, CHANNEL_LAYOUT_MONO,
- AudioParameters::kTelephoneSampleRate, bytes_per_sample * 2, samples);
+ AudioParameters::kTelephoneSampleRate, bytes_per_sample * 8, samples);
AudioOutputStream* oas = audio_man->MakeAudioOutputStream(params);
ASSERT_TRUE(NULL != oas);
EXPECT_TRUE(oas->Open());
diff --git a/media/audio/win/audio_low_latency_input_win.cc b/media/audio/win/audio_low_latency_input_win.cc
index 839ace8..8344aba 100644
--- a/media/audio/win/audio_low_latency_input_win.cc
+++ b/media/audio/win/audio_low_latency_input_win.cc
@@ -32,10 +32,10 @@ WASAPIAudioInputStream::WASAPIAudioInputStream(
DCHECK(avrt_init) << "Failed to load the Avrt.dll";
// Set up the desired capture format specified by the client.
- format_.nSamplesPerSec = params.sample_rate;
+ format_.nSamplesPerSec = params.sample_rate();
format_.wFormatTag = WAVE_FORMAT_PCM;
- format_.wBitsPerSample = params.bits_per_sample;
- format_.nChannels = params.channels;
+ format_.wBitsPerSample = params.bits_per_sample();
+ format_.nChannels = params.channels();
format_.nBlockAlign = (format_.wBitsPerSample / 8) * format_.nChannels;
format_.nAvgBytesPerSec = format_.nSamplesPerSec * format_.nBlockAlign;
format_.cbSize = 0;
@@ -44,8 +44,8 @@ WASAPIAudioInputStream::WASAPIAudioInputStream(
frame_size_ = format_.nBlockAlign;
// Store size of audio packets which we expect to get from the audio
// endpoint device in each capture event.
- packet_size_frames_ = params.GetPacketSize() / format_.nBlockAlign;
- packet_size_bytes_ = params.GetPacketSize();
+ packet_size_frames_ = params.GetBytesPerBuffer() / format_.nBlockAlign;
+ packet_size_bytes_ = params.GetBytesPerBuffer();
DVLOG(1) << "Number of bytes per audio frame : " << frame_size_;
DVLOG(1) << "Number of audio frames per packet: " << packet_size_frames_;
@@ -60,7 +60,7 @@ WASAPIAudioInputStream::WASAPIAudioInputStream(
stop_capture_event_.Set(CreateEvent(NULL, FALSE, FALSE, NULL));
DCHECK(stop_capture_event_.IsValid());
- ms_to_frame_count_ = static_cast<double>(params.sample_rate) / 1000.0;
+ ms_to_frame_count_ = static_cast<double>(params.sample_rate()) / 1000.0;
LARGE_INTEGER performance_frequency;
if (QueryPerformanceFrequency(&performance_frequency)) {
@@ -221,14 +221,14 @@ double WASAPIAudioInputStream::GetVolume() {
}
// static
-double WASAPIAudioInputStream::HardwareSampleRate(
+int WASAPIAudioInputStream::HardwareSampleRate(
const std::string& device_id) {
base::win::ScopedCoMem<WAVEFORMATEX> audio_engine_mix_format;
HRESULT hr = GetMixFormat(device_id, &audio_engine_mix_format);
if (FAILED(hr))
- return 0.0;
+ return 0;
- return static_cast<double>(audio_engine_mix_format->nSamplesPerSec);
+ return static_cast<int>(audio_engine_mix_format->nSamplesPerSec);
}
// static
diff --git a/media/audio/win/audio_low_latency_input_win.h b/media/audio/win/audio_low_latency_input_win.h
index d34c025..276a342 100644
--- a/media/audio/win/audio_low_latency_input_win.h
+++ b/media/audio/win/audio_low_latency_input_win.h
@@ -105,7 +105,7 @@ class MEDIA_EXPORT WASAPIAudioInputStream
// Retrieves the sample rate used by the audio engine for its internal
// processing/mixing of shared-mode streams given a specifed device.
- static double HardwareSampleRate(const std::string& device_id);
+ static int HardwareSampleRate(const std::string& device_id);
// Retrieves the number of audio channels used by the audio engine for its
// internal processing/mixing of shared-mode streams given a specifed device.
diff --git a/media/audio/win/audio_low_latency_output_win.cc b/media/audio/win/audio_low_latency_output_win.cc
index 994cbb6..28dbac8 100644
--- a/media/audio/win/audio_low_latency_output_win.cc
+++ b/media/audio/win/audio_low_latency_output_win.cc
@@ -1,4 +1,4 @@
-// Copyright (c) 2011 The Chromium Authors. All rights reserved.
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
@@ -39,10 +39,10 @@ WASAPIAudioOutputStream::WASAPIAudioOutputStream(AudioManagerWin* manager,
DCHECK(avrt_init) << "Failed to load the avrt.dll";
// Set up the desired render format specified by the client.
- format_.nSamplesPerSec = params.sample_rate;
+ format_.nSamplesPerSec = params.sample_rate();
format_.wFormatTag = WAVE_FORMAT_PCM;
- format_.wBitsPerSample = params.bits_per_sample;
- format_.nChannels = params.channels;
+ format_.wBitsPerSample = params.bits_per_sample();
+ format_.nChannels = params.channels();
format_.nBlockAlign = (format_.wBitsPerSample / 8) * format_.nChannels;
format_.nAvgBytesPerSec = format_.nSamplesPerSec * format_.nBlockAlign;
format_.cbSize = 0;
@@ -52,9 +52,9 @@ WASAPIAudioOutputStream::WASAPIAudioOutputStream(AudioManagerWin* manager,
// Store size (in different units) of audio packets which we expect to
// get from the audio endpoint device in each render event.
- packet_size_frames_ = params.GetPacketSize() / format_.nBlockAlign;
- packet_size_bytes_ = params.GetPacketSize();
- packet_size_ms_ = (1000.0 * packet_size_frames_) / params.sample_rate;
+ packet_size_frames_ = params.GetBytesPerBuffer() / format_.nBlockAlign;
+ packet_size_bytes_ = params.GetBytesPerBuffer();
+ packet_size_ms_ = (1000.0 * packet_size_frames_) / params.sample_rate();
DVLOG(1) << "Number of bytes per audio frame : " << frame_size_;
DVLOG(1) << "Number of audio frames per packet: " << packet_size_frames_;
DVLOG(1) << "Number of milliseconds per packet: " << packet_size_ms_;
@@ -253,7 +253,7 @@ void WASAPIAudioOutputStream::GetVolume(double* volume) {
}
// static
-double WASAPIAudioOutputStream::HardwareSampleRate(ERole device_role) {
+int WASAPIAudioOutputStream::HardwareSampleRate(ERole device_role) {
// It is assumed that this static method is called from a COM thread, i.e.,
// CoInitializeEx() is not called here again to avoid STA/MTA conflicts.
ScopedComPtr<IMMDeviceEnumerator> enumerator;
@@ -296,7 +296,7 @@ double WASAPIAudioOutputStream::HardwareSampleRate(ERole device_role) {
return 0.0;
}
- return static_cast<double>(audio_engine_mix_format->nSamplesPerSec);
+ return static_cast<int>(audio_engine_mix_format->nSamplesPerSec);
}
void WASAPIAudioOutputStream::Run() {
diff --git a/media/audio/win/audio_low_latency_output_win.h b/media/audio/win/audio_low_latency_output_win.h
index cc844a0..63a1c52 100644
--- a/media/audio/win/audio_low_latency_output_win.h
+++ b/media/audio/win/audio_low_latency_output_win.h
@@ -1,4 +1,4 @@
-// Copyright (c) 2011 The Chromium Authors. All rights reserved.
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
//
@@ -161,7 +161,7 @@ class MEDIA_EXPORT WASAPIAudioOutputStream
// Retrieves the stream format that the audio engine uses for its internal
// processing/mixing of shared-mode streams.
- static double HardwareSampleRate(ERole device_role);
+ static int HardwareSampleRate(ERole device_role);
bool started() const { return started_; }
diff --git a/media/audio/win/audio_manager_win.cc b/media/audio/win/audio_manager_win.cc
index 7aebfe0..1d19fe1 100644
--- a/media/audio/win/audio_manager_win.cc
+++ b/media/audio/win/audio_manager_win.cc
@@ -238,8 +238,8 @@ void AudioManagerWin::GetAudioInputDeviceNames(
// - PCMWaveOutAudioOutputStream: Based on the waveOut API.
AudioOutputStream* AudioManagerWin::MakeLinearOutputStream(
const AudioParameters& params) {
- DCHECK_EQ(AudioParameters::AUDIO_PCM_LINEAR, params.format);
- if (params.channels > kWinMaxChannels)
+ DCHECK_EQ(AudioParameters::AUDIO_PCM_LINEAR, params.format());
+ if (params.channels() > kWinMaxChannels)
return NULL;
return new PCMWaveOutAudioOutputStream(this, params, 3, WAVE_MAPPER);
@@ -252,8 +252,8 @@ AudioOutputStream* AudioManagerWin::MakeLinearOutputStream(
// - WASAPIAudioOutputStream: Based on Core Audio (WASAPI) API.
AudioOutputStream* AudioManagerWin::MakeLowLatencyOutputStream(
const AudioParameters& params) {
- DCHECK_EQ(AudioParameters::AUDIO_PCM_LOW_LATENCY, params.format);
- if (params.channels > kWinMaxChannels)
+ DCHECK_EQ(AudioParameters::AUDIO_PCM_LOW_LATENCY, params.format());
+ if (params.channels() > kWinMaxChannels)
return NULL;
AudioOutputStream* stream = NULL;
@@ -274,7 +274,7 @@ AudioOutputStream* AudioManagerWin::MakeLowLatencyOutputStream(
// mode.
AudioInputStream* AudioManagerWin::MakeLinearInputStream(
const AudioParameters& params, const std::string& device_id) {
- DCHECK_EQ(AudioParameters::AUDIO_PCM_LINEAR, params.format);
+ DCHECK_EQ(AudioParameters::AUDIO_PCM_LINEAR, params.format());
return CreatePCMWaveInAudioInputStream(params, device_id);
}
@@ -282,7 +282,7 @@ AudioInputStream* AudioManagerWin::MakeLinearInputStream(
// AUDIO_PCM_LOW_LATENCY mode.
AudioInputStream* AudioManagerWin::MakeLowLatencyInputStream(
const AudioParameters& params, const std::string& device_id) {
- DCHECK_EQ(AudioParameters::AUDIO_PCM_LOW_LATENCY, params.format);
+ DCHECK_EQ(AudioParameters::AUDIO_PCM_LOW_LATENCY, params.format());
AudioInputStream* stream = NULL;
if (!media::IsWASAPISupported()) {
// Fall back to Windows Wave implementation on Windows XP or lower.
diff --git a/media/audio/win/wavein_input_win.cc b/media/audio/win/wavein_input_win.cc
index cd9febc..f8a4e7d 100644
--- a/media/audio/win/wavein_input_win.cc
+++ b/media/audio/win/wavein_input_win.cc
@@ -36,15 +36,15 @@ PCMWaveInAudioInputStream::PCMWaveInAudioInputStream(
callback_(NULL),
num_buffers_(num_buffers),
buffer_(NULL),
- channels_(params.channels) {
+ channels_(params.channels()) {
format_.wFormatTag = WAVE_FORMAT_PCM;
- format_.nChannels = params.channels > 2 ? 2 : params.channels;
- format_.nSamplesPerSec = params.sample_rate;
- format_.wBitsPerSample = params.bits_per_sample;
+ format_.nChannels = params.channels() > 2 ? 2 : params.channels();
+ format_.nSamplesPerSec = params.sample_rate();
+ format_.wBitsPerSample = params.bits_per_sample();
format_.cbSize = 0;
format_.nBlockAlign = (format_.nChannels * format_.wBitsPerSample) / 8;
format_.nAvgBytesPerSec = format_.nBlockAlign * format_.nSamplesPerSec;
- buffer_size_ = params.samples_per_packet * format_.nBlockAlign;
+ buffer_size_ = params.frames_per_buffer() * format_.nBlockAlign;
// If we don't have a packet size we use 100ms.
if (!buffer_size_)
buffer_size_ = format_.nAvgBytesPerSec / 10;
diff --git a/media/audio/win/waveout_output_win.cc b/media/audio/win/waveout_output_win.cc
index 35fd63a..c8c7523 100644
--- a/media/audio/win/waveout_output_win.cc
+++ b/media/audio/win/waveout_output_win.cc
@@ -84,27 +84,27 @@ PCMWaveOutAudioOutputStream::PCMWaveOutAudioOutputStream(
waveout_(NULL),
callback_(NULL),
num_buffers_(num_buffers),
- buffer_size_(params.GetPacketSize()),
+ buffer_size_(params.GetBytesPerBuffer()),
volume_(1),
- channels_(params.channels),
+ channels_(params.channels()),
pending_bytes_(0) {
format_.Format.wFormatTag = WAVE_FORMAT_EXTENSIBLE;
- format_.Format.nChannels = params.channels;
- format_.Format.nSamplesPerSec = params.sample_rate;
- format_.Format.wBitsPerSample = params.bits_per_sample;
+ format_.Format.nChannels = params.channels();
+ format_.Format.nSamplesPerSec = params.sample_rate();
+ format_.Format.wBitsPerSample = params.bits_per_sample();
format_.Format.cbSize = sizeof(format_) - sizeof(WAVEFORMATEX);
// The next are computed from above.
format_.Format.nBlockAlign = (format_.Format.nChannels *
format_.Format.wBitsPerSample) / 8;
format_.Format.nAvgBytesPerSec = format_.Format.nBlockAlign *
format_.Format.nSamplesPerSec;
- if (params.channels > kMaxChannelsToMask) {
+ if (params.channels() > kMaxChannelsToMask) {
format_.dwChannelMask = kChannelsToMask[kMaxChannelsToMask];
} else {
- format_.dwChannelMask = kChannelsToMask[params.channels];
+ format_.dwChannelMask = kChannelsToMask[params.channels()];
}
format_.SubFormat = KSDATAFORMAT_SUBTYPE_PCM;
- format_.Samples.wValidBitsPerSample = params.bits_per_sample;
+ format_.Samples.wValidBitsPerSample = params.bits_per_sample();
}
PCMWaveOutAudioOutputStream::~PCMWaveOutAudioOutputStream() {
diff --git a/media/base/audio_renderer_sink.h b/media/base/audio_renderer_sink.h
index 3a7017a..01eb185 100644
--- a/media/base/audio_renderer_sink.h
+++ b/media/base/audio_renderer_sink.h
@@ -43,10 +43,7 @@ class AudioRendererSink
// Sets important information about the audio stream format.
// It must be called before any of the other methods.
- virtual void Initialize(size_t buffer_size,
- int channels,
- double sample_rate,
- AudioParameters::Format latency_format,
+ virtual void Initialize(const AudioParameters& params,
RenderCallback* callback) = 0;
// Starts audio playback.
diff --git a/media/base/channel_layout.h b/media/base/channel_layout.h
index 7dd1ec8..6a7d1ad 100644
--- a/media/base/channel_layout.h
+++ b/media/base/channel_layout.h
@@ -1,4 +1,4 @@
-// Copyright (c) 2011 The Chromium Authors. All rights reserved.
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
@@ -24,7 +24,7 @@ enum ChannelLayout {
CHANNEL_LAYOUT_SURROUND,
// Front L, Front R, Front C, Back C
- CHANNEL_LAYOUT_4POINT0,
+ CHANNEL_LAYOUT_4_0,
// Front L, Front R, Side L, Side R
CHANNEL_LAYOUT_2_2,
@@ -33,25 +33,25 @@ enum ChannelLayout {
CHANNEL_LAYOUT_QUAD,
// Front L, Front R, Front C, Side L, Side R
- CHANNEL_LAYOUT_5POINT0,
+ CHANNEL_LAYOUT_5_0,
// Front L, Front R, Front C, Side L, Side R, LFE
- CHANNEL_LAYOUT_5POINT1,
+ CHANNEL_LAYOUT_5_1,
// Front L, Front R, Front C, Back L, Back R
- CHANNEL_LAYOUT_5POINT0_BACK,
+ CHANNEL_LAYOUT_5_0_BACK,
// Front L, Front R, Front C, Back L, Back R, LFE
- CHANNEL_LAYOUT_5POINT1_BACK,
+ CHANNEL_LAYOUT_5_1_BACK,
// Front L, Front R, Front C, Side L, Side R, Back L, Back R
- CHANNEL_LAYOUT_7POINT0,
+ CHANNEL_LAYOUT_7_0,
// Front L, Front R, Front C, Side L, Side R, LFE, Back L, Back R
- CHANNEL_LAYOUT_7POINT1,
+ CHANNEL_LAYOUT_7_1,
// Front L, Front R, Front C, Back L, Back R, LFE, Front LofC, Front RofC
- CHANNEL_LAYOUT_7POINT1_WIDE,
+ CHANNEL_LAYOUT_7_1_WIDE,
// Stereo L, Stereo R
CHANNEL_LAYOUT_STEREO_DOWNMIX,
diff --git a/media/ffmpeg/ffmpeg_common.cc b/media/ffmpeg/ffmpeg_common.cc
index 978aff0..72683a4 100644
--- a/media/ffmpeg/ffmpeg_common.cc
+++ b/media/ffmpeg/ffmpeg_common.cc
@@ -298,25 +298,25 @@ ChannelLayout ChannelLayoutToChromeChannelLayout(int64_t layout,
case AV_CH_LAYOUT_SURROUND:
return CHANNEL_LAYOUT_SURROUND;
case AV_CH_LAYOUT_4POINT0:
- return CHANNEL_LAYOUT_4POINT0;
+ return CHANNEL_LAYOUT_4_0;
case AV_CH_LAYOUT_2_2:
return CHANNEL_LAYOUT_2_2;
case AV_CH_LAYOUT_QUAD:
return CHANNEL_LAYOUT_QUAD;
case AV_CH_LAYOUT_5POINT0:
- return CHANNEL_LAYOUT_5POINT0;
+ return CHANNEL_LAYOUT_5_0;
case AV_CH_LAYOUT_5POINT1:
- return CHANNEL_LAYOUT_5POINT1;
+ return CHANNEL_LAYOUT_5_1;
case AV_CH_LAYOUT_5POINT0_BACK:
- return CHANNEL_LAYOUT_5POINT0_BACK;
+ return CHANNEL_LAYOUT_5_0_BACK;
case AV_CH_LAYOUT_5POINT1_BACK:
- return CHANNEL_LAYOUT_5POINT1_BACK;
+ return CHANNEL_LAYOUT_5_1_BACK;
case AV_CH_LAYOUT_7POINT0:
- return CHANNEL_LAYOUT_7POINT0;
+ return CHANNEL_LAYOUT_7_0;
case AV_CH_LAYOUT_7POINT1:
- return CHANNEL_LAYOUT_7POINT1;
+ return CHANNEL_LAYOUT_7_1;
case AV_CH_LAYOUT_7POINT1_WIDE:
- return CHANNEL_LAYOUT_7POINT1_WIDE;
+ return CHANNEL_LAYOUT_7_1_WIDE;
case AV_CH_LAYOUT_STEREO_DOWNMIX:
return CHANNEL_LAYOUT_STEREO_DOWNMIX;
default: