summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--content/content_renderer.gypi6
-rw-r--r--content/renderer/media/webrtc_audio_device_impl.cc732
-rw-r--r--content/renderer/media/webrtc_audio_device_impl.h281
3 files changed, 1019 insertions, 0 deletions
diff --git a/content/content_renderer.gypi b/content/content_renderer.gypi
index ff966a7..3836edc 100644
--- a/content/content_renderer.gypi
+++ b/content/content_renderer.gypi
@@ -25,6 +25,10 @@
],
'include_dirs': [
'..',
+ # Adding two webrtc-includes here to avoid adding a deeper dependency than required.
+ # TODO(henrika): to be removed when "proper" WebRTC-dependency is added to this target.
+ '../third_party/webrtc',
+ '../third_party/webrtc/modules/interface',
],
'sources': [
'renderer/active_notification_tracker.cc',
@@ -75,6 +79,8 @@
'renderer/media/video_capture_impl_manager.h',
'renderer/media/video_capture_message_filter.cc',
'renderer/media/video_capture_message_filter.h',
+ 'renderer/media/webrtc_audio_device_impl.cc',
+ 'renderer/media/webrtc_audio_device_impl.h',
'renderer/navigation_state.cc',
'renderer/navigation_state.h',
'renderer/notification_provider.cc',
diff --git a/content/renderer/media/webrtc_audio_device_impl.cc b/content/renderer/media/webrtc_audio_device_impl.cc
new file mode 100644
index 0000000..4fbcd66
--- /dev/null
+++ b/content/renderer/media/webrtc_audio_device_impl.cc
@@ -0,0 +1,732 @@
+// Copyright (c) 2011 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "content/renderer/media/webrtc_audio_device_impl.h"
+
+#include "base/string_util.h"
+#include "media/audio/audio_util.h"
+
+// TODO(henrika): come up with suitable value(s) for all platforms.
+// Max supported size for input and output buffers.
+// Unit is in #(audio frames), hence 1440 <=> 30ms @ 48kHz.
+static const size_t kMaxBufferSize = 1440;
+static const int kMaxChannels = 2;
+static const int64 kMillisecondsBetweenProcessCalls = 5000;
+static const char kVersion[] = "WebRTC AudioDevice 1.0.0.Chrome";
+
+WebRtcAudioDeviceImpl::WebRtcAudioDeviceImpl(
+ size_t input_buffer_size, size_t output_buffer_size,
+ int input_channels, int output_channels,
+ double input_sample_rate, double output_sample_rate)
+ : audio_transport_callback_(NULL),
+ last_error_(AudioDeviceModule::kAdmErrNone),
+ input_buffer_size_(input_buffer_size),
+ output_buffer_size_(output_buffer_size),
+ input_channels_(input_channels),
+ output_channels_(output_channels),
+ input_sample_rate_(input_sample_rate),
+ output_sample_rate_(output_sample_rate),
+ initialized_(false),
+ playing_(false),
+ recording_(false),
+ input_delay_ms_(0),
+ output_delay_ms_(0),
+ last_process_time_(base::TimeTicks::Now()) {
+ VLOG(1) << "WebRtcAudioDeviceImpl::WebRtcAudioDeviceImpl()";
+
+ // Create an AudioInputDevice client if the requested buffer size
+ // is an even multiple of 10 milliseconds.
+ if (BufferSizeIsValid(input_buffer_size, input_sample_rate)) {
+ audio_input_device_ = new AudioInputDevice(
+ input_buffer_size,
+ input_channels,
+ input_sample_rate,
+ this);
+ }
+
+ // Create an AudioDevice client if the requested buffer size
+ // is an even multiple of 10 milliseconds.
+ if (BufferSizeIsValid(output_buffer_size, output_sample_rate)) {
+ audio_output_device_ = new AudioDevice(
+ output_buffer_size,
+ output_channels,
+ output_sample_rate,
+ this);
+ }
+ DCHECK(audio_input_device_);
+ DCHECK(audio_output_device_);
+
+ input_buffer_.reset(new int16[kMaxBufferSize * kMaxChannels]);
+ output_buffer_.reset(new int16[kMaxBufferSize * kMaxChannels]);
+
+ bytes_per_sample_ = sizeof(*input_buffer_.get());
+}
+
+WebRtcAudioDeviceImpl::~WebRtcAudioDeviceImpl() {
+ VLOG(1) << "WebRtcAudioDeviceImpl::~WebRtcAudioDeviceImpl()";
+ if (playing_)
+ StopPlayout();
+ if (recording_)
+ StopRecording();
+ if (initialized_)
+ Terminate();
+}
+
+void WebRtcAudioDeviceImpl::Render(
+ const std::vector<float*>& audio_data,
+ size_t number_of_frames,
+ size_t audio_delay_milliseconds) {
+ DCHECK_LE(number_of_frames, kMaxBufferSize);
+
+ // Store the reported audio delay locally.
+ output_delay_ms_ = audio_delay_milliseconds;
+
+ const int channels = audio_data.size();
+ DCHECK_LE(channels, kMaxChannels);
+
+ const int samples_per_sec = static_cast<int>(input_sample_rate_);
+ uint32_t samples_per_10_msec = (samples_per_sec / 100);
+ const int bytes_per_10_msec =
+ channels * samples_per_10_msec * bytes_per_sample_;
+
+ uint32_t num_audio_samples = 0;
+ size_t accumulated_audio_samples = 0;
+
+ char* audio_byte_buffer = reinterpret_cast<char*>(output_buffer_.get());
+
+ // Get audio samples in blocks of 10 milliseconds from the registered
+ // webrtc::AudioTransport source. Keep reading until our internal buffer
+ // is full.
+ while (accumulated_audio_samples < number_of_frames) {
+ // Get 10ms and append output to temporary byte buffer.
+ audio_transport_callback_->NeedMorePlayData(samples_per_10_msec,
+ bytes_per_sample_,
+ channels,
+ samples_per_sec,
+ audio_byte_buffer,
+ num_audio_samples);
+ accumulated_audio_samples += num_audio_samples;
+ audio_byte_buffer += bytes_per_10_msec;
+ }
+
+ // Deinterleave each channel and convert to 32-bit floating-point
+ // with nominal range -1.0 -> +1.0 to match the callback format.
+ for (int channel_index = 0; channel_index < channels; ++channel_index) {
+ media::DeinterleaveAudioChannel(
+ output_buffer_.get(),
+ audio_data[channel_index],
+ channels,
+ channel_index,
+ bytes_per_sample_,
+ number_of_frames);
+ }
+}
+
+void WebRtcAudioDeviceImpl::Capture(
+ const std::vector<float*>& audio_data,
+ size_t number_of_frames,
+ size_t audio_delay_milliseconds) {
+ DCHECK_LE(number_of_frames, kMaxBufferSize);
+
+ // Store the reported audio delay locally.
+ input_delay_ms_ = audio_delay_milliseconds;
+
+ const int channels = audio_data.size();
+ DCHECK_LE(channels, kMaxChannels);
+ uint32_t new_mic_level = 0;
+
+ // Interleave, scale, and clip input to int16 and store result in
+ // a local byte buffer.
+ media::InterleaveFloatToInt16(audio_data,
+ input_buffer_.get(),
+ number_of_frames);
+
+ const int samples_per_sec = static_cast<int>(output_sample_rate_);
+ const int samples_per_10_msec = (samples_per_sec / 100);
+ const int bytes_per_10_msec =
+ channels * samples_per_10_msec * bytes_per_sample_;
+ size_t accumulated_audio_samples = 0;
+
+ char* audio_byte_buffer = reinterpret_cast<char*>(input_buffer_.get());
+
+ // Write audio samples in blocks of 10 milliseconds to the registered
+ // webrtc::AudioTransport sink. Keep writing until our internal byte
+ // buffer is empty.
+ while (accumulated_audio_samples < number_of_frames) {
+ // Deliver 10ms of recorded PCM audio.
+ // TODO(henrika): add support for analog AGC?
+ audio_transport_callback_->RecordedDataIsAvailable(
+ audio_byte_buffer,
+ samples_per_10_msec,
+ bytes_per_sample_,
+ channels,
+ samples_per_sec,
+ input_delay_ms_ + output_delay_ms_,
+ 0, // clock_drift
+ 0, // current_mic_level
+ new_mic_level); // not used
+ accumulated_audio_samples += samples_per_10_msec;
+ audio_byte_buffer += bytes_per_10_msec;
+ }
+}
+
+int32_t WebRtcAudioDeviceImpl::Version(char* version,
+ uint32_t& remaining_buffer_in_bytes,
+ uint32_t& position) const {
+ VLOG(1) << "Version()";
+ DCHECK(version);
+ if (version == NULL)
+ return -1;
+ size_t arr_size = arraysize(kVersion);
+ if (remaining_buffer_in_bytes < arr_size) {
+ DLOG(WARNING) << "version string requires " << arr_size << " bytes";
+ return -1;
+ }
+ base::strlcpy(&version[position], kVersion, arr_size - 1);
+ remaining_buffer_in_bytes -= arr_size;
+ position += arr_size;
+ VLOG(1) << "version: " << version;
+ return 0;
+}
+
+int32_t WebRtcAudioDeviceImpl::ChangeUniqueId(const int32_t id) {
+ NOTIMPLEMENTED();
+ return -1;
+}
+
+int32_t WebRtcAudioDeviceImpl::TimeUntilNextProcess() {
+ // Calculate the number of milliseconds until this module wants its
+ // Process method to be called.
+ base::TimeDelta delta_time = (base::TimeTicks::Now() - last_process_time_);
+ int64 time_until_next =
+ kMillisecondsBetweenProcessCalls - delta_time.InMilliseconds();
+ return static_cast<int32_t>(time_until_next);
+}
+
+int32_t WebRtcAudioDeviceImpl::Process() {
+ // TODO(henrika): it is possible to add functionality in this method, which
+ // is called periodically. The idea is that we should call one of the methods
+ // in the registered AudioDeviceObserver to inform the user about warnings
+ // or error states. Leave it empty for now.
+ last_process_time_ = base::TimeTicks::Now();
+ return 0;
+}
+
+int32_t WebRtcAudioDeviceImpl::ActiveAudioLayer(AudioLayer* audio_layer) const {
+ NOTIMPLEMENTED();
+ return -1;
+}
+
+webrtc::AudioDeviceModule::ErrorCode WebRtcAudioDeviceImpl::LastError() const {
+ return last_error_;
+}
+
+int32_t WebRtcAudioDeviceImpl::RegisterEventObserver(
+ webrtc::AudioDeviceObserver* event_callback) {
+ VLOG(1) << "RegisterEventObserver()";
+ NOTIMPLEMENTED();
+ return -1;
+}
+
+int32_t WebRtcAudioDeviceImpl::RegisterAudioCallback(
+ webrtc::AudioTransport* audio_callback) {
+ VLOG(1) << "RegisterAudioCallback()";
+ if (playing_ || recording_) {
+ LOG(ERROR) << "Unable to (de)register transport during active media";
+ return -1;
+ }
+ audio_transport_callback_ = audio_callback;
+ return 0;
+}
+
+int32_t WebRtcAudioDeviceImpl::Init() {
+ VLOG(1) << "Init()";
+ if (initialized_)
+ return 0;
+ initialized_ = true;
+ return 0;
+}
+
+int32_t WebRtcAudioDeviceImpl::Terminate() {
+ VLOG(1) << "Terminate()";
+ if (!initialized_)
+ return 0;
+ initialized_ = false;
+ return 0;
+}
+
+bool WebRtcAudioDeviceImpl::Initialized() const {
+ return initialized_;
+}
+
+int16_t WebRtcAudioDeviceImpl::PlayoutDevices() {
+ NOTIMPLEMENTED();
+ return -1;
+}
+
+int16_t WebRtcAudioDeviceImpl::RecordingDevices() {
+ NOTIMPLEMENTED();
+ return -1;
+}
+
+int32_t WebRtcAudioDeviceImpl::PlayoutDeviceName(
+ uint16_t index,
+ char name[webrtc::kAdmMaxDeviceNameSize],
+ char guid[webrtc::kAdmMaxGuidSize]) {
+ NOTIMPLEMENTED();
+ return -1;
+}
+
+int32_t WebRtcAudioDeviceImpl::RecordingDeviceName(
+ uint16_t index,
+ char name[webrtc::kAdmMaxDeviceNameSize],
+ char guid[webrtc::kAdmMaxGuidSize]) {
+ NOTIMPLEMENTED();
+ return -1;
+}
+
+int32_t WebRtcAudioDeviceImpl::SetPlayoutDevice(uint16_t index) {
+ VLOG(1) << "SetPlayoutDevice(index=" << index << ")";
+ NOTIMPLEMENTED();
+ return 0;
+}
+
+int32_t WebRtcAudioDeviceImpl::SetPlayoutDevice(WindowsDeviceType device) {
+ VLOG(1) << "SetPlayoutDevice(device=" << device << ")";
+ NOTIMPLEMENTED();
+ return 0;
+}
+
+int32_t WebRtcAudioDeviceImpl::SetRecordingDevice(uint16_t index) {
+ VLOG(1) << "SetRecordingDevice(index=" << index << ")";
+ NOTIMPLEMENTED();
+ return 0;
+}
+
+int32_t WebRtcAudioDeviceImpl::SetRecordingDevice(WindowsDeviceType device) {
+ VLOG(1) << "SetRecordingDevice(device=" << device << ")";
+ NOTIMPLEMENTED();
+ return 0;
+}
+
+int32_t WebRtcAudioDeviceImpl::PlayoutIsAvailable(bool* available) {
+ VLOG(1) << "PlayoutIsAvailable()";
+ *available = (audio_output_device_ != NULL);
+ return 0;
+}
+
+int32_t WebRtcAudioDeviceImpl::InitPlayout() {
+ VLOG(1) << "InitPlayout()";
+ NOTIMPLEMENTED();
+ return 0;
+}
+
+bool WebRtcAudioDeviceImpl::PlayoutIsInitialized() const {
+ VLOG(1) << "PlayoutIsInitialized()";
+ return (audio_output_device_ != NULL);
+}
+
+int32_t WebRtcAudioDeviceImpl::RecordingIsAvailable(bool* available) {
+ VLOG(1) << "RecordingIsAvailable()";
+ *available = (audio_input_device_ != NULL);
+ return 0;
+}
+
+int32_t WebRtcAudioDeviceImpl::InitRecording() {
+ VLOG(1) << "InitRecording()";
+ NOTIMPLEMENTED();
+ return 0;
+}
+
+bool WebRtcAudioDeviceImpl::RecordingIsInitialized() const {
+ VLOG(1) << "RecordingIsInitialized()";
+ return (audio_input_device_ != NULL);
+}
+
+int32_t WebRtcAudioDeviceImpl::StartPlayout() {
+ VLOG(1) << "StartPlayout()";
+ if (!audio_transport_callback_) {
+ LOG(ERROR) << "Audio transport is missing";
+ return -1;
+ }
+ if (playing_) {
+ // webrtc::VoiceEngine assumes that it is OK to call Start() twice and
+ // that the call is ignored the second time.
+ LOG(WARNING) << "Playout is already active";
+ return 0;
+ }
+ playing_ = audio_output_device_->Start();
+ return (playing_ ? 0 : -1);
+}
+
+int32_t WebRtcAudioDeviceImpl::StopPlayout() {
+ VLOG(1) << "StopPlayout()";
+ DCHECK(audio_output_device_);
+ if (!playing_) {
+ // webrtc::VoiceEngine assumes that it is OK to call Stop() just in case.
+ LOG(WARNING) << "Playout was already stopped";
+ return 0;
+ }
+ playing_ = !audio_output_device_->Stop();
+ return (!playing_ ? 0 : -1);
+}
+
+bool WebRtcAudioDeviceImpl::Playing() const {
+ return playing_;
+}
+
+int32_t WebRtcAudioDeviceImpl::StartRecording() {
+ VLOG(1) << "StartRecording()";
+ LOG_IF(ERROR, !audio_transport_callback_) << "Audio transport is missing";
+ if (!audio_transport_callback_) {
+ LOG(ERROR) << "Audio transport is missing";
+ return -1;
+ }
+ if (recording_) {
+ // webrtc::VoiceEngine assumes that it is OK to call Start() twice and
+ // that the call is ignored the second time.
+ LOG(WARNING) << "Recording is already active";
+ return 0;
+ }
+ recording_ = audio_input_device_->Start();
+ return (recording_ ? 0 : -1);
+}
+
+int32_t WebRtcAudioDeviceImpl::StopRecording() {
+ VLOG(1) << "StopRecording()";
+ DCHECK(audio_input_device_);
+ if (!recording_) {
+ // webrtc::VoiceEngine assumes that it is OK to call Stop() just in case.
+ LOG(WARNING) << "Recording was already stopped";
+ return 0;
+ }
+ recording_ = !audio_input_device_->Stop();
+ return (!recording_ ? 0 : -1);
+}
+
+bool WebRtcAudioDeviceImpl::Recording() const {
+ return recording_;
+}
+
+int32_t WebRtcAudioDeviceImpl::SetAGC(bool enable) {
+ NOTIMPLEMENTED();
+ return -1;
+}
+
+bool WebRtcAudioDeviceImpl::AGC() const {
+ NOTIMPLEMENTED();
+ return false;
+}
+
+int32_t WebRtcAudioDeviceImpl::SetWaveOutVolume(uint16_t volume_left,
+ uint16_t volume_right) {
+ NOTIMPLEMENTED();
+ return -1;
+}
+int32_t WebRtcAudioDeviceImpl::WaveOutVolume(
+ uint16_t* volume_left,
+ uint16_t* volume_right) const {
+ NOTIMPLEMENTED();
+ return -1;
+}
+
+int32_t WebRtcAudioDeviceImpl::SpeakerIsAvailable(bool* available) {
+ VLOG(1) << "SpeakerIsAvailable()";
+ NOTIMPLEMENTED();
+ *available = true;
+ return 0;
+}
+
+int32_t WebRtcAudioDeviceImpl::InitSpeaker() {
+ VLOG(1) << "InitSpeaker()";
+ NOTIMPLEMENTED();
+ return 0;
+}
+
+bool WebRtcAudioDeviceImpl::SpeakerIsInitialized() const {
+ NOTIMPLEMENTED();
+ return true;
+}
+
+int32_t WebRtcAudioDeviceImpl::MicrophoneIsAvailable(bool* available) {
+ VLOG(1) << "MicrophoneIsAvailable()";
+ NOTIMPLEMENTED();
+ *available = true;
+ return 0;
+}
+
+int32_t WebRtcAudioDeviceImpl::InitMicrophone() {
+ VLOG(1) << "InitMicrophone()";
+ NOTIMPLEMENTED();
+ return 0;
+}
+
+bool WebRtcAudioDeviceImpl::MicrophoneIsInitialized() const {
+ NOTIMPLEMENTED();
+ return true;
+}
+
+int32_t WebRtcAudioDeviceImpl::SpeakerVolumeIsAvailable(
+ bool* available) {
+ NOTIMPLEMENTED();
+ return -1;
+}
+
+int32_t WebRtcAudioDeviceImpl::SetSpeakerVolume(uint32_t volume) {
+ NOTIMPLEMENTED();
+ return -1;
+}
+
+int32_t WebRtcAudioDeviceImpl::SpeakerVolume(uint32_t* volume) const {
+ NOTIMPLEMENTED();
+ return -1;
+}
+
+int32_t WebRtcAudioDeviceImpl::MaxSpeakerVolume(uint32_t* max_volume) const {
+ NOTIMPLEMENTED();
+ return -1;
+}
+
+int32_t WebRtcAudioDeviceImpl::MinSpeakerVolume(
+ uint32_t* min_volume) const {
+ NOTIMPLEMENTED();
+ return -1;
+}
+
+int32_t WebRtcAudioDeviceImpl::SpeakerVolumeStepSize(
+ uint16_t* step_size) const {
+ NOTIMPLEMENTED();
+ return -1;
+}
+
+int32_t WebRtcAudioDeviceImpl::MicrophoneVolumeIsAvailable(bool* available) {
+ NOTIMPLEMENTED();
+ return -1;
+}
+
+int32_t WebRtcAudioDeviceImpl::SetMicrophoneVolume(uint32_t volume) {
+ NOTIMPLEMENTED();
+ return -1;
+}
+
+int32_t WebRtcAudioDeviceImpl::MicrophoneVolume(uint32_t* volume) const {
+ NOTIMPLEMENTED();
+ return -1;
+}
+
+int32_t WebRtcAudioDeviceImpl::MaxMicrophoneVolume(
+ uint32_t* max_volume) const {
+ NOTIMPLEMENTED();
+ return -1;
+}
+
+int32_t WebRtcAudioDeviceImpl::MinMicrophoneVolume(
+ uint32_t* min_volume) const {
+ NOTIMPLEMENTED();
+ return -1;
+}
+
+int32_t WebRtcAudioDeviceImpl::MicrophoneVolumeStepSize(
+ uint16_t* step_size) const {
+ NOTIMPLEMENTED();
+ return -1;
+}
+
+int32_t WebRtcAudioDeviceImpl::SpeakerMuteIsAvailable(bool* available) {
+ NOTIMPLEMENTED();
+ return -1;
+}
+
+int32_t WebRtcAudioDeviceImpl::SetSpeakerMute(bool enable) {
+ NOTIMPLEMENTED();
+ return -1;
+}
+
+int32_t WebRtcAudioDeviceImpl::SpeakerMute(bool* enabled) const {
+ NOTIMPLEMENTED();
+ return -1;
+}
+
+int32_t WebRtcAudioDeviceImpl::MicrophoneMuteIsAvailable(
+ bool* available) {
+ NOTIMPLEMENTED();
+ return -1;
+}
+
+int32_t WebRtcAudioDeviceImpl::SetMicrophoneMute(bool enable) {
+ NOTIMPLEMENTED();
+ return -1;
+}
+
+int32_t WebRtcAudioDeviceImpl::MicrophoneMute(bool* enabled) const {
+ NOTIMPLEMENTED();
+ return -1;
+}
+
+int32_t WebRtcAudioDeviceImpl::MicrophoneBoostIsAvailable(bool* available) {
+ NOTIMPLEMENTED();
+ return -1;
+}
+
+int32_t WebRtcAudioDeviceImpl::SetMicrophoneBoost(bool enable) {
+ NOTIMPLEMENTED();
+ return -1;
+}
+
+int32_t WebRtcAudioDeviceImpl::MicrophoneBoost(bool* enabled) const {
+ NOTIMPLEMENTED();
+ return -1;
+}
+
+int32_t WebRtcAudioDeviceImpl::StereoPlayoutIsAvailable(bool* available) const {
+ VLOG(1) << "StereoPlayoutIsAvailable()";
+ NOTIMPLEMENTED();
+ *available = false;
+ return 0;
+}
+
+int32_t WebRtcAudioDeviceImpl::SetStereoPlayout(bool enable) {
+ VLOG(1) << "SetStereoPlayout(enable=" << enable << ")";
+ NOTIMPLEMENTED();
+ return 0;
+}
+
+int32_t WebRtcAudioDeviceImpl::StereoPlayout(bool* enabled) const {
+ NOTIMPLEMENTED();
+ return 0;
+}
+
+int32_t WebRtcAudioDeviceImpl::StereoRecordingIsAvailable(
+ bool* available) const {
+ VLOG(1) << "StereoRecordingIsAvailable()";
+ NOTIMPLEMENTED();
+ return 0;
+}
+
+int32_t WebRtcAudioDeviceImpl::SetStereoRecording(bool enable) {
+ VLOG(1) << "SetStereoRecording(enable=" << enable << ")";
+ NOTIMPLEMENTED();
+ return -1;
+}
+
+int32_t WebRtcAudioDeviceImpl::StereoRecording(bool* enabled) const {
+ NOTIMPLEMENTED();
+ return -1;
+}
+
+int32_t WebRtcAudioDeviceImpl::SetRecordingChannel(const ChannelType channel) {
+ NOTIMPLEMENTED();
+ return -1;
+}
+
+int32_t WebRtcAudioDeviceImpl::RecordingChannel(ChannelType* channel) const {
+ NOTIMPLEMENTED();
+ return -1;
+}
+
+int32_t WebRtcAudioDeviceImpl::SetPlayoutBuffer(const BufferType type,
+ uint16_t size_ms) {
+ NOTIMPLEMENTED();
+ return -1;
+}
+
+int32_t WebRtcAudioDeviceImpl::PlayoutBuffer(BufferType* type,
+ uint16_t* size_ms) const {
+ NOTIMPLEMENTED();
+ return -1;
+}
+
+int32_t WebRtcAudioDeviceImpl::PlayoutDelay(uint16_t* delay_ms) const {
+ // Report the cached output delay value.
+ *delay_ms = static_cast<uint16_t>(output_delay_ms_);
+ return 0;
+}
+
+int32_t WebRtcAudioDeviceImpl::RecordingDelay(uint16_t* delay_ms) const {
+ // Report the cached output delay value.
+ *delay_ms = static_cast<uint16_t>(input_delay_ms_);
+ return 0;
+}
+
+int32_t WebRtcAudioDeviceImpl::CPULoad(uint16_t* load) const {
+ NOTIMPLEMENTED();
+ return -1;
+}
+
+int32_t WebRtcAudioDeviceImpl::StartRawOutputFileRecording(
+ const char pcm_file_name_utf8[webrtc::kAdmMaxFileNameSize]) {
+ NOTIMPLEMENTED();
+ return -1;
+}
+
+int32_t WebRtcAudioDeviceImpl::StopRawOutputFileRecording() {
+ NOTIMPLEMENTED();
+ return -1;
+}
+
+int32_t WebRtcAudioDeviceImpl::StartRawInputFileRecording(
+ const char pcm_file_name_utf8[webrtc::kAdmMaxFileNameSize]) {
+ NOTIMPLEMENTED();
+ return -1;
+}
+
+int32_t WebRtcAudioDeviceImpl::StopRawInputFileRecording() {
+ NOTIMPLEMENTED();
+ return -1;
+}
+
+int32_t WebRtcAudioDeviceImpl::SetRecordingSampleRate(
+ const uint32_t samples_per_sec) {
+ // Sample rate should only be set at construction.
+ NOTIMPLEMENTED();
+ return -1;
+}
+
+int32_t WebRtcAudioDeviceImpl::RecordingSampleRate(
+ uint32_t* samples_per_sec) const {
+ // Returns the sample rate set at construction.
+ *samples_per_sec = static_cast<uint32_t>(input_sample_rate_);
+ return 0;
+}
+
+int32_t WebRtcAudioDeviceImpl::SetPlayoutSampleRate(
+ const uint32_t samples_per_sec) {
+ // Sample rate should only be set at construction.
+ NOTIMPLEMENTED();
+ return -1;
+}
+
+int32_t WebRtcAudioDeviceImpl::PlayoutSampleRate(
+ uint32_t* samples_per_sec) const {
+ // Returns the sample rate set at construction.
+ *samples_per_sec = static_cast<uint32_t>(output_sample_rate_);
+ return 0;
+}
+
+int32_t WebRtcAudioDeviceImpl::ResetAudioDevice() {
+ NOTIMPLEMENTED();
+ return -1;
+}
+
+int32_t WebRtcAudioDeviceImpl::SetLoudspeakerStatus(bool enable) {
+ NOTIMPLEMENTED();
+ return -1;
+}
+
+int32_t WebRtcAudioDeviceImpl::GetLoudspeakerStatus(bool* enabled) const {
+ NOTIMPLEMENTED();
+ return -1;
+}
+
+bool WebRtcAudioDeviceImpl::BufferSizeIsValid(
+ size_t buffer_size, float sample_rate) const {
+ const int samples_per_sec = static_cast<int>(sample_rate);
+ const int samples_per_10_msec = (samples_per_sec / 100);
+ bool size_is_valid = (((buffer_size % samples_per_10_msec) == 0) &&
+ (buffer_size <= kMaxBufferSize));
+ DLOG_IF(WARNING, !size_is_valid) << "Size of buffer must be and even "
+ << "multiple of 10 ms and less than "
+ << kMaxBufferSize;
+ return size_is_valid;
+}
diff --git a/content/renderer/media/webrtc_audio_device_impl.h b/content/renderer/media/webrtc_audio_device_impl.h
new file mode 100644
index 0000000..52536a2
--- /dev/null
+++ b/content/renderer/media/webrtc_audio_device_impl.h
@@ -0,0 +1,281 @@
+// Copyright (c) 2011 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef CONTENT_RENDERER_MEDIA_WEBRTC_AUDIO_DEVICE_IMPL_H_
+#define CONTENT_RENDERER_MEDIA_WEBRTC_AUDIO_DEVICE_IMPL_H_
+#pragma once
+
+#include <vector>
+
+#include "base/basictypes.h"
+#include "base/memory/ref_counted.h"
+#include "base/memory/scoped_ptr.h"
+#include "base/time.h"
+#include "content/renderer/media/audio_device.h"
+#include "content/renderer/media/audio_input_device.h"
+#include "third_party/webrtc/modules/audio_device/main/interface/audio_device.h"
+
+// A WebRtcAudioDeviceImpl instance implements the abstract interface
+// webrtc::AudioDeviceModule which makes it possible for a user (e.g. webrtc::
+// VoiceEngine) to register this class as an external AudioDeviceModule.
+// The user can then call WebRtcAudioDeviceImpl::StartPlayout() and
+// WebRtcAudioDeviceImpl::StartRecording() from the render process
+// to initiate and start audio rendering and capturing in the browser process.
+// IPC is utilized to set up the media streams.
+//
+// Usage example (30ms packet size, PCM mono samples at 48kHz sample rate):
+//
+// using namespace webrtc;
+//
+// scoped_ptr<WebRtcAudioDeviceImpl> external_adm;
+// external_adm.reset(
+// new WebRtcAudioDeviceImpl(1440, 1440, 1, 1, 48000, 48000));
+// VoiceEngine* voe = VoiceEngine::Create();
+// VoEBase* base = VoEBase::GetInterface(voe);
+// base->RegisterAudioDeviceModule(*external_adm);
+// base->Init();
+// int ch = base->CreateChannel();
+// ...
+// base->StartReceive(ch)
+// base->StartPlayout(ch);
+// base->StartSending(ch);
+// ...
+// <== full-duplex audio session ==>
+// ...
+// base->DeleteChannel(ch);
+// base->Terminate();
+// base->Release();
+// VoiceEngine::Delete(voe);
+//
+// Note that, WebRtcAudioDeviceImpl::RegisterAudioCallback() will
+// be called by the webrtc::VoiceEngine::Init() call and the
+// webrtc::VoiceEngine is an webrtc::AudioTransport implementation.
+// Hence, when the underlying audio layer wants data samples to be played out,
+// the AudioDevice::RenderCallback() will be called, which in turn uses the
+// registered webrtc::AudioTransport callback and feeds the data to the
+// webrtc::VoiceEngine.
+//
+// The picture below illustrates the media flow on the capture side:
+//
+// .------------------. .----------------------.
+// (Native audio) => | AudioInputStream |-> OnData ->| AudioInputController |-.
+// .------------------. .----------------------. |
+// |
+// browser process |
+// - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - (*)
+// renderer process |
+// |
+// .-------------------------------. .------------------. |
+// .---| WebRtcAudioDeviceImpl |<- Capture <-| AudioInputDevice | <--.
+// | .-------------------------------. .------------------.
+// |
+// | .---------------------.
+// .-> RecordedDataIsAvailable ->| webrtc::VoiceEngine | => (encode+transmit)
+// .---------------------.
+//
+// (*) Using SyncSocket for inter-process synchronization with low latency.
+// The actual data is transferred via SharedMemory. IPC is not involved
+// in the actual media transfer.
+//
+// This class must be created on the main render thread since it creates
+// AudioDevice and AudioInputDevice objects and they both require a valid
+// RenderThread::current() pointer.
+//
+class WebRtcAudioDeviceImpl
+ : public webrtc::AudioDeviceModule,
+ public AudioDevice::RenderCallback,
+ public AudioInputDevice::CaptureCallback {
+ public:
+ WebRtcAudioDeviceImpl(size_t input_buffer_size,
+ size_t output_buffer_size,
+ int input_channels,
+ int output_channels,
+ double input_sample_rate,
+ double output_sample_rate);
+ virtual ~WebRtcAudioDeviceImpl();
+
+ // AudioDevice::RenderCallback implementation.
+ virtual void Render(const std::vector<float*>& audio_data,
+ size_t number_of_frames,
+ size_t audio_delay_milliseconds);
+
+ // AudioInputDevice::CaptureCallback implementation.
+ virtual void Capture(const std::vector<float*>& audio_data,
+ size_t number_of_frames,
+ size_t audio_delay_milliseconds);
+
+ // webrtc::Module implementation.
+ virtual int32_t Version(char* version,
+ uint32_t& remaining_buffer_in_bytes,
+ uint32_t& position) const;
+ virtual int32_t ChangeUniqueId(const int32_t id);
+ virtual int32_t TimeUntilNextProcess();
+ virtual int32_t Process();
+
+ // webrtc::AudioDeviceModule implementation.
+ virtual int32_t ActiveAudioLayer(AudioLayer* audio_layer) const;
+ virtual ErrorCode LastError() const;
+
+ virtual int32_t RegisterEventObserver(
+ webrtc::AudioDeviceObserver* event_callback);
+ virtual int32_t RegisterAudioCallback(webrtc::AudioTransport* audio_callback);
+
+ virtual int32_t Init();
+ virtual int32_t Terminate();
+ virtual bool Initialized() const;
+
+ virtual int16_t PlayoutDevices();
+ virtual int16_t RecordingDevices();
+ virtual int32_t PlayoutDeviceName(uint16_t index,
+ char name[webrtc::kAdmMaxDeviceNameSize],
+ char guid[webrtc::kAdmMaxGuidSize]);
+ virtual int32_t RecordingDeviceName(uint16_t index,
+ char name[webrtc::kAdmMaxDeviceNameSize],
+ char guid[webrtc::kAdmMaxGuidSize]);
+
+ virtual int32_t SetPlayoutDevice(uint16_t index);
+ virtual int32_t SetPlayoutDevice(WindowsDeviceType device);
+ virtual int32_t SetRecordingDevice(uint16_t index);
+ virtual int32_t SetRecordingDevice(WindowsDeviceType device);
+
+ virtual int32_t PlayoutIsAvailable(bool* available);
+ virtual int32_t InitPlayout();
+ virtual bool PlayoutIsInitialized() const;
+ virtual int32_t RecordingIsAvailable(bool* available);
+ virtual int32_t InitRecording();
+ virtual bool RecordingIsInitialized() const;
+
+ virtual int32_t StartPlayout();
+ virtual int32_t StopPlayout();
+ virtual bool Playing() const;
+ virtual int32_t StartRecording();
+ virtual int32_t StopRecording();
+ virtual bool Recording() const;
+
+ virtual int32_t SetAGC(bool enable);
+ virtual bool AGC() const;
+
+ virtual int32_t SetWaveOutVolume(uint16_t volume_left,
+ uint16_t volume_right);
+ virtual int32_t WaveOutVolume(uint16_t* volume_left,
+ uint16_t* volume_right) const;
+
+ virtual int32_t SpeakerIsAvailable(bool* available);
+ virtual int32_t InitSpeaker();
+ virtual bool SpeakerIsInitialized() const;
+ virtual int32_t MicrophoneIsAvailable(bool* available);
+ virtual int32_t InitMicrophone();
+ virtual bool MicrophoneIsInitialized() const;
+
+ virtual int32_t SpeakerVolumeIsAvailable(bool* available);
+ virtual int32_t SetSpeakerVolume(uint32_t volume);
+ virtual int32_t SpeakerVolume(uint32_t* volume) const;
+ virtual int32_t MaxSpeakerVolume(uint32_t* max_volume) const;
+ virtual int32_t MinSpeakerVolume(uint32_t* min_volume) const;
+ virtual int32_t SpeakerVolumeStepSize(uint16_t* step_size) const;
+
+ virtual int32_t MicrophoneVolumeIsAvailable(bool* available);
+ virtual int32_t SetMicrophoneVolume(uint32_t volume);
+ virtual int32_t MicrophoneVolume(uint32_t* volume) const;
+ virtual int32_t MaxMicrophoneVolume(uint32_t* max_volume) const;
+ virtual int32_t MinMicrophoneVolume(uint32_t* min_volume) const;
+ virtual int32_t MicrophoneVolumeStepSize(uint16_t* step_size) const;
+
+ virtual int32_t SpeakerMuteIsAvailable(bool* available);
+ virtual int32_t SetSpeakerMute(bool enable);
+ virtual int32_t SpeakerMute(bool* enabled) const;
+
+ virtual int32_t MicrophoneMuteIsAvailable(bool* available);
+ virtual int32_t SetMicrophoneMute(bool enable);
+ virtual int32_t MicrophoneMute(bool* enabled) const;
+
+ virtual int32_t MicrophoneBoostIsAvailable(bool* available);
+ virtual int32_t SetMicrophoneBoost(bool enable);
+ virtual int32_t MicrophoneBoost(bool* enabled) const;
+
+ virtual int32_t StereoPlayoutIsAvailable(bool* available) const;
+ virtual int32_t SetStereoPlayout(bool enable);
+ virtual int32_t StereoPlayout(bool* enabled) const;
+ virtual int32_t StereoRecordingIsAvailable(bool* available) const;
+ virtual int32_t SetStereoRecording(bool enable);
+ virtual int32_t StereoRecording(bool* enabled) const;
+ virtual int32_t SetRecordingChannel(const ChannelType channel);
+ virtual int32_t RecordingChannel(ChannelType* channel) const;
+
+ virtual int32_t SetPlayoutBuffer(const BufferType type, uint16_t size_ms);
+ virtual int32_t PlayoutBuffer(BufferType* type, uint16_t* size_ms) const;
+ virtual int32_t PlayoutDelay(uint16_t* delay_ms) const;
+ virtual int32_t RecordingDelay(uint16_t* delay_ms) const;
+
+ virtual int32_t CPULoad(uint16_t* load) const;
+
+ virtual int32_t StartRawOutputFileRecording(
+ const char pcm_file_name_utf8[webrtc::kAdmMaxFileNameSize]);
+ virtual int32_t StopRawOutputFileRecording();
+ virtual int32_t StartRawInputFileRecording(
+ const char pcm_file_name_utf8[webrtc::kAdmMaxFileNameSize]);
+ virtual int32_t StopRawInputFileRecording();
+
+ virtual int32_t SetRecordingSampleRate(const uint32_t samples_per_sec);
+ virtual int32_t RecordingSampleRate(uint32_t* samples_per_sec) const;
+ virtual int32_t SetPlayoutSampleRate(const uint32_t samples_per_sec);
+ virtual int32_t PlayoutSampleRate(uint32_t* samples_per_sec) const;
+
+ virtual int32_t ResetAudioDevice();
+ virtual int32_t SetLoudspeakerStatus(bool enable);
+ virtual int32_t GetLoudspeakerStatus(bool* enabled) const;
+
+ // Helpers.
+ bool BufferSizeIsValid(size_t buffer_size, float sample_rate) const;
+
+ // Accessors.
+ size_t input_buffer_size() const { return input_buffer_size_; }
+ size_t output_buffer_size() const { return output_buffer_size_; }
+ int input_channels() const { return input_channels_; }
+ int output_channels() const { return output_channels_; }
+
+ private:
+ // Provides access to the native audio input layer in the browser process.
+ scoped_refptr<AudioInputDevice> audio_input_device_;
+
+ // Provides access to the native audio output layer in the browser process.
+ scoped_refptr<AudioDevice> audio_output_device_;
+
+ // Weak reference to the audio callback.
+ // The webrtc client defines |audio_transport_callback_| by calling
+ // RegisterAudioCallback().
+ webrtc::AudioTransport* audio_transport_callback_;
+
+ webrtc::AudioDeviceModule::ErrorCode last_error_;
+
+ size_t input_buffer_size_;
+ size_t output_buffer_size_;
+ int input_channels_;
+ int output_channels_;
+ double input_sample_rate_;
+ double output_sample_rate_;
+
+ int bytes_per_sample_;
+
+ bool initialized_;
+ bool playing_;
+ bool recording_;
+
+ // Cached value of the current audio delay on the input/capture side.
+ int input_delay_ms_;
+
+ // Cached value of the current audio delay on the output/renderer side.
+ int output_delay_ms_;
+
+ base::TimeTicks last_process_time_;
+
+ // Buffers used for temporary storage during capture/render callbacks.
+ // Allocated during construction to save stack.
+ scoped_array<int16> input_buffer_;
+ scoped_array<int16> output_buffer_;
+
+ DISALLOW_COPY_AND_ASSIGN(WebRtcAudioDeviceImpl);
+};
+
+#endif // CONTENT_RENDERER_MEDIA_WEBRTC_AUDIO_DEVICE_IMPL_H_