diff options
author | crogers@google.com <crogers@google.com@0039d316-1c4b-4281-b951-d872f2087c98> | 2013-02-07 00:54:10 +0000 |
---|---|---|
committer | crogers@google.com <crogers@google.com@0039d316-1c4b-4281-b951-d872f2087c98> | 2013-02-07 00:54:10 +0000 |
commit | c158a30079b86c411ff72e0925755b1c6873fbbd (patch) | |
tree | dcd9aad135b0bb18926a57066e3efe6ba6a8d895 | |
parent | e876248ed8d704fc22a58e0b77fb96e01467558b (diff) | |
download | chromium_src-c158a30079b86c411ff72e0925755b1c6873fbbd.zip chromium_src-c158a30079b86c411ff72e0925755b1c6873fbbd.tar.gz chromium_src-c158a30079b86c411ff72e0925755b1c6873fbbd.tar.bz2 |
Plumb |input_channels| all the way to AudioManager
to support synchronized audio I/O without requiring use of the
"Web Audio Input" enable flag.
The approach taken is to include |input_channels| as part of the AudioParameters
class so that we can represent synchronized I/O streams directly without needing to separately
pass |input_channels| through-out the callstack.
Please note that we're still not yet removing the "Web Audio Input"
flag until we more properly verify the input device selection from getUserMedia().
BUG=none
TEST=manual test: http://chromium.googlecode.com/svn/trunk/samples/audio/visualizer-live.html
Review URL: https://codereview.chromium.org/11878032
git-svn-id: svn://svn.chromium.org/chrome/trunk/src@181126 0039d316-1c4b-4281-b951-d872f2087c98
28 files changed, 116 insertions, 122 deletions
diff --git a/content/browser/renderer_host/media/audio_input_renderer_host.cc b/content/browser/renderer_host/media/audio_input_renderer_host.cc index 91ea5b9..f83d19a 100644 --- a/content/browser/renderer_host/media/audio_input_renderer_host.cc +++ b/content/browser/renderer_host/media/audio_input_renderer_host.cc @@ -225,7 +225,7 @@ void AudioInputRendererHost::OnCreateStream( if (media_stream_manager_->audio_input_device_manager()-> ShouldUseFakeDevice()) { audio_params.Reset(media::AudioParameters::AUDIO_FAKE, - params.channel_layout(), params.sample_rate(), + params.channel_layout(), 0, params.sample_rate(), params.bits_per_sample(), params.frames_per_buffer()); } diff --git a/content/browser/renderer_host/media/audio_renderer_host.cc b/content/browser/renderer_host/media/audio_renderer_host.cc index 1901e2f..6c028bb 100644 --- a/content/browser/renderer_host/media/audio_renderer_host.cc +++ b/content/browser/renderer_host/media/audio_renderer_host.cc @@ -252,9 +252,10 @@ bool AudioRendererHost::OnMessageReceived(const IPC::Message& message, } void AudioRendererHost::OnCreateStream( - int stream_id, const media::AudioParameters& params, int input_channels) { + int stream_id, const media::AudioParameters& params) { DCHECK(BrowserThread::CurrentlyOn(BrowserThread::IO)); // media::AudioParameters is validated in the deserializer. + int input_channels = params.input_channels(); if (input_channels < 0 || input_channels > media::limits::kMaxChannels || LookupById(stream_id) != NULL) { diff --git a/content/browser/renderer_host/media/audio_renderer_host.h b/content/browser/renderer_host/media/audio_renderer_host.h index c90d717..f4733b2 100644 --- a/content/browser/renderer_host/media/audio_renderer_host.h +++ b/content/browser/renderer_host/media/audio_renderer_host.h @@ -109,8 +109,7 @@ class CONTENT_EXPORT AudioRendererHost // successful this object would keep an internal entry of the stream for the // required properties. void OnCreateStream(int stream_id, - const media::AudioParameters& params, - int input_channels); + const media::AudioParameters& params); // Track that the data for the audio stream referenced by |stream_id| is // produced by an entity in the render view referenced by |render_view_id|. diff --git a/content/browser/renderer_host/media/audio_renderer_host_unittest.cc b/content/browser/renderer_host/media/audio_renderer_host_unittest.cc index 865fd12..eaaadcc 100644 --- a/content/browser/renderer_host/media/audio_renderer_host_unittest.cc +++ b/content/browser/renderer_host/media/audio_renderer_host_unittest.cc @@ -212,8 +212,7 @@ class AudioRendererHostTest : public testing::Test { media::AudioParameters::AUDIO_FAKE, media::CHANNEL_LAYOUT_STEREO, media::AudioParameters::kAudioCDSampleRate, 16, - media::AudioParameters::kAudioCDSampleRate / 10), - 0); + media::AudioParameters::kAudioCDSampleRate / 10)); message_loop_->Run(); // Simulate the renderer process associating a stream with a render view. diff --git a/content/common/media/audio_messages.h b/content/common/media/audio_messages.h index 8977417..330a1b4 100644 --- a/content/common/media/audio_messages.h +++ b/content/common/media/audio_messages.h @@ -92,10 +92,9 @@ IPC_MESSAGE_CONTROL2(AudioInputMsg_NotifyDeviceStarted, // Messages sent from the renderer to the browser. // Request that got sent to browser for creating an audio output stream -IPC_MESSAGE_CONTROL3(AudioHostMsg_CreateStream, +IPC_MESSAGE_CONTROL2(AudioHostMsg_CreateStream, int /* stream_id */, - media::AudioParameters, /* params */ - int /* input_channels */) + media::AudioParameters /* params */) // Request that got sent to browser for creating an audio input stream IPC_MESSAGE_CONTROL4(AudioInputHostMsg_CreateStream, diff --git a/content/common/media/media_param_traits.cc b/content/common/media/media_param_traits.cc index 85ef57c..31ba948 100644 --- a/content/common/media/media_param_traits.cc +++ b/content/common/media/media_param_traits.cc @@ -24,23 +24,25 @@ void ParamTraits<AudioParameters>::Write(Message* m, m->WriteInt(p.bits_per_sample()); m->WriteInt(p.frames_per_buffer()); m->WriteInt(p.channels()); + m->WriteInt(p.input_channels()); } bool ParamTraits<AudioParameters>::Read(const Message* m, PickleIterator* iter, AudioParameters* r) { int format, channel_layout, sample_rate, bits_per_sample, - frames_per_buffer, channels; + frames_per_buffer, channels, input_channels; if (!m->ReadInt(iter, &format) || !m->ReadInt(iter, &channel_layout) || !m->ReadInt(iter, &sample_rate) || !m->ReadInt(iter, &bits_per_sample) || !m->ReadInt(iter, &frames_per_buffer) || - !m->ReadInt(iter, &channels)) + !m->ReadInt(iter, &channels) || + !m->ReadInt(iter, &input_channels)) return false; r->Reset(static_cast<AudioParameters::Format>(format), - static_cast<ChannelLayout>(channel_layout), + static_cast<ChannelLayout>(channel_layout), input_channels, sample_rate, bits_per_sample, frames_per_buffer); if (!r->IsValid()) return false; diff --git a/content/renderer/media/audio_message_filter.cc b/content/renderer/media/audio_message_filter.cc index 4eef36c..0b8e106 100644 --- a/content/renderer/media/audio_message_filter.cc +++ b/content/renderer/media/audio_message_filter.cc @@ -43,9 +43,8 @@ void AudioMessageFilter::RemoveDelegate(int id) { } void AudioMessageFilter::CreateStream(int stream_id, - const media::AudioParameters& params, - int input_channels) { - Send(new AudioHostMsg_CreateStream(stream_id, params, input_channels)); + const media::AudioParameters& params) { + Send(new AudioHostMsg_CreateStream(stream_id, params)); } void AudioMessageFilter::AssociateStreamWithProducer(int stream_id, diff --git a/content/renderer/media/audio_message_filter.h b/content/renderer/media/audio_message_filter.h index 1172607..56c22c8 100644 --- a/content/renderer/media/audio_message_filter.h +++ b/content/renderer/media/audio_message_filter.h @@ -38,8 +38,8 @@ class CONTENT_EXPORT AudioMessageFilter // media::AudioOutputIPC implementation. virtual int AddDelegate(media::AudioOutputIPCDelegate* delegate) OVERRIDE; virtual void RemoveDelegate(int id) OVERRIDE; - virtual void CreateStream(int stream_id, const media::AudioParameters& params, - int input_channels) OVERRIDE; + virtual void CreateStream(int stream_id, + const media::AudioParameters& params) OVERRIDE; virtual void PlayStream(int stream_id) OVERRIDE; virtual void PauseStream(int stream_id) OVERRIDE; virtual void FlushStream(int stream_id) OVERRIDE; diff --git a/content/renderer/media/renderer_webaudiodevice_impl.cc b/content/renderer/media/renderer_webaudiodevice_impl.cc index e81cb55..ca32103 100644 --- a/content/renderer/media/renderer_webaudiodevice_impl.cc +++ b/content/renderer/media/renderer_webaudiodevice_impl.cc @@ -22,10 +22,8 @@ namespace content { RendererWebAudioDeviceImpl::RendererWebAudioDeviceImpl( const media::AudioParameters& params, - int input_channels, WebAudioDevice::RenderCallback* callback) : params_(params), - input_channels_(input_channels), client_callback_(callback) { DCHECK(client_callback_); } @@ -41,17 +39,7 @@ void RendererWebAudioDeviceImpl::start() { return; // Already started. output_device_ = AudioDeviceFactory::NewOutputDevice(); - - // TODO(crogers): remove once we properly handle input device selection. - // https://code.google.com/p/chromium/issues/detail?id=147327 - if (CommandLine::ForCurrentProcess()->HasSwitch( - switches::kEnableWebAudioInput)) { - // TODO(crogers): support more than hard-coded stereo: - // https://code.google.com/p/chromium/issues/detail?id=147326 - output_device_->InitializeIO(params_, 2, this); - } else { - output_device_->InitializeIO(params_, input_channels_, this); - } + output_device_->Initialize(params_, this); // Assumption: This method is being invoked within a V8 call stack. CHECKs // will fail in the call to frameForCurrentContext() otherwise. @@ -97,20 +85,20 @@ void RendererWebAudioDeviceImpl::RenderIO(media::AudioBus* source, // Make the client callback for an I/O cycle. if (client_callback_) { // Wrap the input pointers using WebVector. - size_t input_channels = + size_t source_channels = source ? static_cast<size_t>(source->channels()) : 0; - WebVector<float*> web_audio_input_data(input_channels); - for (size_t i = 0; i < input_channels; ++i) - web_audio_input_data[i] = source->channel(i); + WebVector<float*> web_audio_source_data(source_channels); + for (size_t i = 0; i < source_channels; ++i) + web_audio_source_data[i] = source->channel(i); // Wrap the output pointers using WebVector. - WebVector<float*> web_audio_data( + WebVector<float*> web_audio_dest_data( static_cast<size_t>(dest->channels())); for (int i = 0; i < dest->channels(); ++i) - web_audio_data[i] = dest->channel(i); + web_audio_dest_data[i] = dest->channel(i); - client_callback_->render(web_audio_input_data, - web_audio_data, + client_callback_->render(web_audio_source_data, + web_audio_dest_data, dest->frames()); } } diff --git a/content/renderer/media/renderer_webaudiodevice_impl.h b/content/renderer/media/renderer_webaudiodevice_impl.h index 593535e..ca19d074 100644 --- a/content/renderer/media/renderer_webaudiodevice_impl.h +++ b/content/renderer/media/renderer_webaudiodevice_impl.h @@ -21,7 +21,6 @@ class RendererWebAudioDeviceImpl public media::AudioRendererSink::RenderCallback { public: RendererWebAudioDeviceImpl(const media::AudioParameters& params, - int input_channels, WebKit::WebAudioDevice::RenderCallback* callback); virtual ~RendererWebAudioDeviceImpl(); @@ -42,7 +41,6 @@ class RendererWebAudioDeviceImpl private: const media::AudioParameters params_; - int input_channels_; // Weak reference to the callback into WebKit code. WebKit::WebAudioDevice::RenderCallback* const client_callback_; diff --git a/content/renderer/media/webrtc_audio_capturer.cc b/content/renderer/media/webrtc_audio_capturer.cc index a7e0b4f..976b9ed 100644 --- a/content/renderer/media/webrtc_audio_capturer.cc +++ b/content/renderer/media/webrtc_audio_capturer.cc @@ -101,7 +101,7 @@ bool WebRtcAudioCapturer::Initialize(media::ChannelLayout channel_layout, int buffer_size = GetBufferSizeForSampleRate(sample_rate); // Configure audio parameters for the default source. - params_.Reset(format, channel_layout, sample_rate, 16, buffer_size); + params_.Reset(format, channel_layout, 0, sample_rate, 16, buffer_size); // Tell all sinks which format we use. for (SinkList::const_iterator it = sinks_.begin(); @@ -194,6 +194,7 @@ void WebRtcAudioCapturer::SetCapturerSource( params_.Reset(params_.format(), channel_layout, + 0, sample_rate, 16, // ignored since the audio stack uses float32. buffer_size); diff --git a/content/renderer/media/webrtc_audio_renderer.cc b/content/renderer/media/webrtc_audio_renderer.cc index 1b66b4d..c04a4c1 100644 --- a/content/renderer/media/webrtc_audio_renderer.cc +++ b/content/renderer/media/webrtc_audio_renderer.cc @@ -157,7 +157,7 @@ bool WebRtcAudioRenderer::Initialize(WebRtcAudioRendererSource* source) { } source_params.Reset(media::AudioParameters::AUDIO_PCM_LOW_LATENCY, - channel_layout, sample_rate, 16, buffer_size); + channel_layout, 0, sample_rate, 16, buffer_size); // Set up audio parameters for the sink, i.e., the native audio output stream. // We strive to open up using native parameters to achieve best possible @@ -169,7 +169,7 @@ bool WebRtcAudioRenderer::Initialize(WebRtcAudioRendererSource* source) { buffer_size = hardware_config->GetOutputBufferSize(); sink_params.Reset(media::AudioParameters::AUDIO_PCM_LOW_LATENCY, - channel_layout, sample_rate, 16, buffer_size); + channel_layout, 0, sample_rate, 16, buffer_size); // Create a FIFO if re-buffering is required to match the source input with // the sink request. The source acts as provider here and the sink as diff --git a/content/renderer/pepper/pepper_platform_audio_input_impl.cc b/content/renderer/pepper/pepper_platform_audio_input_impl.cc index 66ce5d4..d9d7326 100644 --- a/content/renderer/pepper/pepper_platform_audio_input_impl.cc +++ b/content/renderer/pepper/pepper_platform_audio_input_impl.cc @@ -162,7 +162,8 @@ bool PepperPlatformAudioInputImpl::Initialize( client_ = client; params_.Reset(media::AudioParameters::AUDIO_PCM_LINEAR, - media::CHANNEL_LAYOUT_MONO, sample_rate, 16, frames_per_buffer); + media::CHANNEL_LAYOUT_MONO, 0, + sample_rate, 16, frames_per_buffer); if (device_id.empty()) { // Use the default device. diff --git a/content/renderer/pepper/pepper_platform_audio_output_impl.cc b/content/renderer/pepper/pepper_platform_audio_output_impl.cc index 2a6dbf6..90ec7eb 100644 --- a/content/renderer/pepper/pepper_platform_audio_output_impl.cc +++ b/content/renderer/pepper/pepper_platform_audio_output_impl.cc @@ -160,7 +160,7 @@ void PepperPlatformAudioOutputImpl::InitializeOnIOThread( stream_id_ = ipc_->AddDelegate(this); DCHECK_NE(0, stream_id_); - ipc_->CreateStream(stream_id_, params, 0); + ipc_->CreateStream(stream_id_, params); ipc_->AssociateStreamWithProducer(stream_id_, source_render_view_id); } diff --git a/content/renderer/renderer_webkitplatformsupport_impl.cc b/content/renderer/renderer_webkitplatformsupport_impl.cc index a9ee441..99c336a 100644 --- a/content/renderer/renderer_webkitplatformsupport_impl.cc +++ b/content/renderer/renderer_webkitplatformsupport_impl.cc @@ -618,10 +618,11 @@ RendererWebKitPlatformSupportImpl::createAudioDevice( } media::AudioParameters params( - media::AudioParameters::AUDIO_PCM_LOW_LATENCY, layout, + media::AudioParameters::AUDIO_PCM_LOW_LATENCY, + layout, input_channels, static_cast<int>(sample_rate), 16, buffer_size); - return new RendererWebAudioDeviceImpl(params, input_channels, callback); + return new RendererWebAudioDeviceImpl(params, callback); } //------------------------------------------------------------------------------ diff --git a/media/audio/audio_device_thread.cc b/media/audio/audio_device_thread.cc index 51d5ecd..c592acc 100644 --- a/media/audio/audio_device_thread.cc +++ b/media/audio/audio_device_thread.cc @@ -177,10 +177,8 @@ void AudioDeviceThread::Thread::Run() { AudioDeviceThread::Callback::Callback( const AudioParameters& audio_parameters, - int input_channels, base::SharedMemoryHandle memory, int memory_length) : audio_parameters_(audio_parameters), - input_channels_(input_channels), samples_per_ms_(audio_parameters.sample_rate() / 1000), bytes_per_ms_(audio_parameters.channels() * (audio_parameters_.bits_per_sample() / 8) * diff --git a/media/audio/audio_device_thread.h b/media/audio/audio_device_thread.h index 44dbc3a..c43a01b 100644 --- a/media/audio/audio_device_thread.h +++ b/media/audio/audio_device_thread.h @@ -37,7 +37,6 @@ class MEDIA_EXPORT AudioDeviceThread { class Callback { public: Callback(const AudioParameters& audio_parameters, - int input_channels, base::SharedMemoryHandle memory, int memory_length); virtual ~Callback(); @@ -57,7 +56,6 @@ class MEDIA_EXPORT AudioDeviceThread { // The variables are 'const' since values are calculated/set in the // constructor and must never change. const AudioParameters audio_parameters_; - const int input_channels_; const int samples_per_ms_; const int bytes_per_ms_; diff --git a/media/audio/audio_input_device.cc b/media/audio/audio_input_device.cc index a60d60d..5c5bcf7 100644 --- a/media/audio/audio_input_device.cc +++ b/media/audio/audio_input_device.cc @@ -303,7 +303,7 @@ AudioInputDevice::AudioThreadCallback::AudioThreadCallback( base::SharedMemoryHandle memory, int memory_length, CaptureCallback* capture_callback) - : AudioDeviceThread::Callback(audio_parameters, 0, memory, memory_length), + : AudioDeviceThread::Callback(audio_parameters, memory, memory_length), capture_callback_(capture_callback) { audio_bus_ = AudioBus::Create(audio_parameters_); } diff --git a/media/audio/audio_manager_base.cc b/media/audio/audio_manager_base.cc index 67e03c0..b453877 100644 --- a/media/audio/audio_manager_base.cc +++ b/media/audio/audio_manager_base.cc @@ -396,7 +396,8 @@ AudioParameters AudioManagerBase::GetPreferredLowLatencyOutputStreamParameters( // TODO(dalecurtis): This should include bits per channel and channel layout // eventually. return AudioParameters( - AudioParameters::AUDIO_PCM_LOW_LATENCY, input_params.channel_layout(), + AudioParameters::AUDIO_PCM_LOW_LATENCY, + input_params.channel_layout(), input_params.input_channels(), GetAudioHardwareSampleRate(), 16, GetAudioHardwareBufferSize()); #endif // defined(OS_IOS) } diff --git a/media/audio/audio_output_device.cc b/media/audio/audio_output_device.cc index cf4ebff..e95d21b 100644 --- a/media/audio/audio_output_device.cc +++ b/media/audio/audio_output_device.cc @@ -22,7 +22,6 @@ class AudioOutputDevice::AudioThreadCallback : public AudioDeviceThread::Callback { public: AudioThreadCallback(const AudioParameters& audio_parameters, - int input_channels, base::SharedMemoryHandle memory, int memory_length, AudioRendererSink::RenderCallback* render_callback); @@ -44,7 +43,6 @@ AudioOutputDevice::AudioOutputDevice( AudioOutputIPC* ipc, const scoped_refptr<base::MessageLoopProxy>& io_loop) : ScopedLoopObserver(io_loop), - input_channels_(0), callback_(NULL), ipc_(ipc), state_(IDLE), @@ -57,19 +55,11 @@ AudioOutputDevice::AudioOutputDevice( void AudioOutputDevice::Initialize(const AudioParameters& params, RenderCallback* callback) { DCHECK(!callback_) << "Calling Initialize() twice?"; + DCHECK(params.IsValid()); audio_parameters_ = params; callback_ = callback; } -void AudioOutputDevice::InitializeIO(const AudioParameters& params, - int input_channels, - RenderCallback* callback) { - DCHECK_GE(input_channels, 0); - DCHECK_LT(input_channels, limits::kMaxChannels); - input_channels_ = input_channels; - Initialize(params, callback); -} - AudioOutputDevice::~AudioOutputDevice() { // The current design requires that the user calls Stop() before deleting // this class. @@ -83,7 +73,7 @@ void AudioOutputDevice::Start() { DCHECK(callback_) << "Initialize hasn't been called"; message_loop()->PostTask(FROM_HERE, base::Bind(&AudioOutputDevice::CreateStreamOnIOThread, this, - audio_parameters_, input_channels_)); + audio_parameters_)); } void AudioOutputDevice::Stop() { @@ -119,12 +109,11 @@ bool AudioOutputDevice::SetVolume(double volume) { return true; } -void AudioOutputDevice::CreateStreamOnIOThread(const AudioParameters& params, - int input_channels) { +void AudioOutputDevice::CreateStreamOnIOThread(const AudioParameters& params) { DCHECK(message_loop()->BelongsToCurrentThread()); if (state_ == IDLE) { state_ = CREATING_STREAM; - ipc_->CreateStream(stream_id_, params, input_channels); + ipc_->CreateStream(stream_id_, params); } } @@ -237,7 +226,7 @@ void AudioOutputDevice::OnStreamCreated( DCHECK(audio_thread_.IsStopped()); audio_callback_.reset(new AudioOutputDevice::AudioThreadCallback( - audio_parameters_, input_channels_, handle, length, callback_)); + audio_parameters_, handle, length, callback_)); audio_thread_.Start(audio_callback_.get(), socket_handle, "AudioOutputDevice"); state_ = PAUSED; @@ -263,12 +252,10 @@ void AudioOutputDevice::WillDestroyCurrentMessageLoop() { AudioOutputDevice::AudioThreadCallback::AudioThreadCallback( const AudioParameters& audio_parameters, - int input_channels, base::SharedMemoryHandle memory, int memory_length, AudioRendererSink::RenderCallback* render_callback) : AudioDeviceThread::Callback(audio_parameters, - input_channels, memory, memory_length), render_callback_(render_callback) { @@ -282,9 +269,10 @@ void AudioOutputDevice::AudioThreadCallback::MapSharedMemory() { // Calculate output and input memory size. int output_memory_size = AudioBus::CalculateMemorySize(audio_parameters_); + int input_channels = audio_parameters_.input_channels(); int frames = audio_parameters_.frames_per_buffer(); int input_memory_size = - AudioBus::CalculateMemorySize(input_channels_, frames); + AudioBus::CalculateMemorySize(input_channels, frames); int io_size = output_memory_size + input_memory_size; @@ -293,12 +281,12 @@ void AudioOutputDevice::AudioThreadCallback::MapSharedMemory() { output_bus_ = AudioBus::WrapMemory(audio_parameters_, shared_memory_.memory()); - if (input_channels_ > 0) { + if (input_channels > 0) { // The input data is after the output data. char* input_data = static_cast<char*>(shared_memory_.memory()) + output_memory_size; input_bus_ = - AudioBus::WrapMemory(input_channels_, frames, input_data); + AudioBus::WrapMemory(input_channels, frames, input_data); } } @@ -319,9 +307,10 @@ void AudioOutputDevice::AudioThreadCallback::Process(int pending_data) { // Update the audio-delay measurement then ask client to render audio. Since // |output_bus_| is wrapping the shared memory the Render() call is writing // directly into the shared memory. + int input_channels = audio_parameters_.input_channels(); size_t num_frames = audio_parameters_.frames_per_buffer(); - if (input_bus_.get() && input_channels_ > 0) { + if (input_bus_.get() && input_channels > 0) { render_callback_->RenderIO(input_bus_.get(), output_bus_.get(), audio_delay_milliseconds); diff --git a/media/audio/audio_output_device.h b/media/audio/audio_output_device.h index 6650028..28a7098 100644 --- a/media/audio/audio_output_device.h +++ b/media/audio/audio_output_device.h @@ -80,9 +80,6 @@ class MEDIA_EXPORT AudioOutputDevice // AudioRendererSink implementation. virtual void Initialize(const AudioParameters& params, RenderCallback* callback) OVERRIDE; - virtual void InitializeIO(const AudioParameters& params, - int input_channels, - RenderCallback* callback) OVERRIDE; virtual void Start() OVERRIDE; virtual void Stop() OVERRIDE; virtual void Play() OVERRIDE; @@ -126,8 +123,7 @@ class MEDIA_EXPORT AudioOutputDevice // The following methods are tasks posted on the IO thread that needs to // be executed on that thread. They interact with AudioMessageFilter and // sends IPC messages on that thread. - void CreateStreamOnIOThread(const AudioParameters& params, - int input_channels); + void CreateStreamOnIOThread(const AudioParameters& params); void PlayOnIOThread(); void PauseOnIOThread(bool flush); void ShutDownOnIOThread(); @@ -139,10 +135,6 @@ class MEDIA_EXPORT AudioOutputDevice AudioParameters audio_parameters_; - // The number of optional synchronized input channels having the same - // sample-rate and buffer-size as specified in audio_parameters_. - int input_channels_; - RenderCallback* callback_; // A pointer to the IPC layer that takes care of sending requests over to diff --git a/media/audio/audio_output_device_unittest.cc b/media/audio/audio_output_device_unittest.cc index 70e2a49..07a752d 100644 --- a/media/audio/audio_output_device_unittest.cc +++ b/media/audio/audio_output_device_unittest.cc @@ -52,8 +52,8 @@ class MockAudioOutputIPC : public AudioOutputIPC { MOCK_METHOD1(AddDelegate, int(AudioOutputIPCDelegate* delegate)); MOCK_METHOD1(RemoveDelegate, void(int stream_id)); - MOCK_METHOD3(CreateStream, - void(int stream_id, const AudioParameters& params, int input_channels)); + MOCK_METHOD2(CreateStream, + void(int stream_id, const AudioParameters& params)); MOCK_METHOD1(PlayStream, void(int stream_id)); MOCK_METHOD1(CloseStream, void(int stream_id)); MOCK_METHOD2(SetVolume, void(int stream_id, double volume)); @@ -110,7 +110,7 @@ class AudioOutputDeviceTest // Must remain the first member of this class. base::ShadowingAtExitManager at_exit_manager_; MessageLoopForIO io_loop_; - const AudioParameters default_audio_parameters_; + AudioParameters default_audio_parameters_; StrictMock<MockRenderCallback> callback_; StrictMock<MockAudioOutputIPC> audio_output_ipc_; scoped_refptr<AudioOutputDevice> audio_device_; @@ -148,25 +148,22 @@ int AudioOutputDeviceTest::CalculateMemorySize() { } AudioOutputDeviceTest::AudioOutputDeviceTest() - : default_audio_parameters_(AudioParameters::AUDIO_PCM_LINEAR, - CHANNEL_LAYOUT_STEREO, - 48000, 16, 1024), - synchronized_io_(GetParam()), + : synchronized_io_(GetParam()), input_channels_(synchronized_io_ ? 2 : 0) { + default_audio_parameters_.Reset( + AudioParameters::AUDIO_PCM_LINEAR, + CHANNEL_LAYOUT_STEREO, input_channels_, + 48000, 16, 1024); + EXPECT_CALL(audio_output_ipc_, AddDelegate(_)) .WillOnce(Return(kStreamId)); audio_device_ = new AudioOutputDevice( &audio_output_ipc_, io_loop_.message_loop_proxy()); - if (synchronized_io_) { - audio_device_->InitializeIO(default_audio_parameters_, - input_channels_, - &callback_); - } else { - audio_device_->Initialize(default_audio_parameters_, - &callback_); - } + audio_device_->Initialize(default_audio_parameters_, + &callback_); + io_loop_.RunUntilIdle(); } @@ -179,7 +176,7 @@ AudioOutputDeviceTest::~AudioOutputDeviceTest() { void AudioOutputDeviceTest::StartAudioDevice() { audio_device_->Start(); - EXPECT_CALL(audio_output_ipc_, CreateStream(kStreamId, _, _)); + EXPECT_CALL(audio_output_ipc_, CreateStream(kStreamId, _)); io_loop_.RunUntilIdle(); } diff --git a/media/audio/audio_output_ipc.h b/media/audio/audio_output_ipc.h index 8543cdc..894ece5 100644 --- a/media/audio/audio_output_ipc.h +++ b/media/audio/audio_output_ipc.h @@ -68,13 +68,12 @@ class MEDIA_EXPORT AudioOutputIPC { // Sends a request to create an AudioOutputController object in the peer // process, identify it by |stream_id| and configure it to use the specified - // audio |params| and number of synchronized input channels. + // audio |params| including number of synchronized input channels. // Once the stream has been created, the implementation must // generate a notification to the AudioOutputIPCDelegate and call // OnStreamCreated(). virtual void CreateStream(int stream_id, - const AudioParameters& params, - int input_channels) = 0; + const AudioParameters& params) = 0; // Starts playing the stream. This should generate a call to // AudioOutputController::Play(). diff --git a/media/audio/audio_parameters.cc b/media/audio/audio_parameters.cc index 0d9263ff5..721dea0a 100644 --- a/media/audio/audio_parameters.cc +++ b/media/audio/audio_parameters.cc @@ -14,7 +14,8 @@ AudioParameters::AudioParameters() sample_rate_(0), bits_per_sample_(0), frames_per_buffer_(0), - channels_(0) { + channels_(0), + input_channels_(0) { } AudioParameters::AudioParameters(Format format, ChannelLayout channel_layout, @@ -25,14 +26,30 @@ AudioParameters::AudioParameters(Format format, ChannelLayout channel_layout, sample_rate_(sample_rate), bits_per_sample_(bits_per_sample), frames_per_buffer_(frames_per_buffer), - channels_(ChannelLayoutToChannelCount(channel_layout)) { + channels_(ChannelLayoutToChannelCount(channel_layout)), + input_channels_(0) { +} + +AudioParameters::AudioParameters(Format format, ChannelLayout channel_layout, + int input_channels, + int sample_rate, int bits_per_sample, + int frames_per_buffer) + : format_(format), + channel_layout_(channel_layout), + sample_rate_(sample_rate), + bits_per_sample_(bits_per_sample), + frames_per_buffer_(frames_per_buffer), + channels_(ChannelLayoutToChannelCount(channel_layout)), + input_channels_(input_channels) { } void AudioParameters::Reset(Format format, ChannelLayout channel_layout, + int input_channels, int sample_rate, int bits_per_sample, int frames_per_buffer) { format_ = format; channel_layout_ = channel_layout; + input_channels_ = input_channels; sample_rate_ = sample_rate; bits_per_sample_ = bits_per_sample; frames_per_buffer_ = frames_per_buffer; @@ -46,6 +63,8 @@ bool AudioParameters::IsValid() const { (channels_ <= media::limits::kMaxChannels) && (channel_layout_ > CHANNEL_LAYOUT_UNSUPPORTED) && (channel_layout_ < CHANNEL_LAYOUT_MAX) && + (input_channels_ >= 0) && + (input_channels_ <= media::limits::kMaxChannels) && (sample_rate_ >= media::limits::kMinSampleRate) && (sample_rate_ <= media::limits::kMaxSampleRate) && (bits_per_sample_ > 0) && diff --git a/media/audio/audio_parameters.h b/media/audio/audio_parameters.h index 0225468..6f3c525 100644 --- a/media/audio/audio_parameters.h +++ b/media/audio/audio_parameters.h @@ -46,7 +46,12 @@ class MEDIA_EXPORT AudioParameters { AudioParameters(Format format, ChannelLayout channel_layout, int sample_rate, int bits_per_sample, int frames_per_buffer); + AudioParameters(Format format, ChannelLayout channel_layout, + int input_channels, + int sample_rate, int bits_per_sample, + int frames_per_buffer); void Reset(Format format, ChannelLayout channel_layout, + int input_channels, int sample_rate, int bits_per_sample, int frames_per_buffer); @@ -69,6 +74,7 @@ class MEDIA_EXPORT AudioParameters { int bits_per_sample() const { return bits_per_sample_; } int frames_per_buffer() const { return frames_per_buffer_; } int channels() const { return channels_; } + int input_channels() const { return input_channels_; } private: Format format_; // Format of the stream. @@ -79,6 +85,9 @@ class MEDIA_EXPORT AudioParameters { int channels_; // Number of channels. Value set based on // |channel_layout|. + int input_channels_; // Optional number of input channels. + // Normally 0, but can be set to specify + // synchronized I/O. }; // Comparison is useful when AudioParameters is used with std structures. @@ -87,6 +96,8 @@ inline bool operator<(const AudioParameters& a, const AudioParameters& b) { return a.format() < b.format(); if (a.channels() != b.channels()) return a.channels() < b.channels(); + if (a.input_channels() != b.input_channels()) + return a.input_channels() < b.input_channels(); if (a.sample_rate() != b.sample_rate()) return a.sample_rate() < b.sample_rate(); if (a.bits_per_sample() != b.bits_per_sample()) diff --git a/media/audio/mac/audio_manager_mac.cc b/media/audio/mac/audio_manager_mac.cc index 9b69cb7..4030a23 100644 --- a/media/audio/mac/audio_manager_mac.cc +++ b/media/audio/mac/audio_manager_mac.cc @@ -283,10 +283,12 @@ AudioOutputStream* AudioManagerMac::MakeLowLatencyOutputStream( const AudioParameters& params) { DCHECK_EQ(AudioParameters::AUDIO_PCM_LOW_LATENCY, params.format()); - // TODO(crogers): remove once we properly handle input device selection. + // TODO(crogers): support more than stereo input. + // TODO(crogers): remove flag once we handle input device selection. // https://code.google.com/p/chromium/issues/detail?id=147327 - if (CommandLine::ForCurrentProcess()->HasSwitch( - switches::kEnableWebAudioInput)) { + if (params.input_channels() == 2 && + CommandLine::ForCurrentProcess()->HasSwitch( + switches::kEnableWebAudioInput)) { if (HasUnifiedDefaultIO()) return new AudioHardwareUnifiedStream(this, params); @@ -328,7 +330,8 @@ AudioParameters AudioManagerMac::GetPreferredLowLatencyOutputStreamParameters( // Specifically, this is a limitation of AudioSynchronizedStream which // can be removed as part of the work to consolidate these back-ends. return AudioParameters( - AudioParameters::AUDIO_PCM_LOW_LATENCY, CHANNEL_LAYOUT_STEREO, + AudioParameters::AUDIO_PCM_LOW_LATENCY, + CHANNEL_LAYOUT_STEREO, input_params.input_channels(), GetAudioHardwareSampleRate(), 16, GetAudioHardwareBufferSize()); } diff --git a/media/audio/win/audio_manager_win.cc b/media/audio/win/audio_manager_win.cc index e62d951..f8e3ba0 100644 --- a/media/audio/win/audio_manager_win.cc +++ b/media/audio/win/audio_manager_win.cc @@ -292,8 +292,11 @@ AudioOutputStream* AudioManagerWin::MakeLowLatencyOutputStream( this, params, media::NumberOfWaveOutBuffers(), WAVE_MAPPER); } - // TODO(henrika): remove once we properly handle input device selection. - if (CommandLine::ForCurrentProcess()->HasSwitch( + // TODO(crogers): support more than stereo input. + // TODO(henrika): remove flag once we properly handle input device selection. + // https://code.google.com/p/chromium/issues/detail?id=147327 + if (params.input_channels() == 2 && + CommandLine::ForCurrentProcess()->HasSwitch( switches::kEnableWebAudioInput)) { if (WASAPIUnifiedStream::HasUnifiedDefaultIO()) { DVLOG(1) << "WASAPIUnifiedStream is created."; diff --git a/media/base/audio_renderer_sink.h b/media/base/audio_renderer_sink.h index ad61c4b..51e6c1f 100644 --- a/media/base/audio_renderer_sink.h +++ b/media/base/audio_renderer_sink.h @@ -41,19 +41,15 @@ class AudioRendererSink // Sets important information about the audio stream format. // It must be called before any of the other methods. - virtual void Initialize(const AudioParameters& params, - RenderCallback* callback) = 0; - - // InitializeIO() may be called instead of Initialize() for clients who wish - // to have synchronized input and output. |input_channels| specifies the + // For clients wishing to have synchronized input and output, + // |params| may specify |input_channels| > 0, representing a // number of input channels which will be at the same sample-rate // and buffer-size as the output as specified in |params|. - // The callback's RenderIO() method will be called instead of Render(), - // providing the synchronized input data at the same time as when new - // output data is to be rendered. - virtual void InitializeIO(const AudioParameters& params, - int input_channels, - RenderCallback* callback) {} + // In this case, the callback's RenderIO() method will be called instead + // of Render(), providing the synchronized input data at the same time as + // when new output data is to be rendered. + virtual void Initialize(const AudioParameters& params, + RenderCallback* callback) = 0; // Starts audio playback. virtual void Start() = 0; |