summaryrefslogtreecommitdiffstats
path: root/content
diff options
context:
space:
mode:
authorajm@chromium.org <ajm@chromium.org@0039d316-1c4b-4281-b951-d872f2087c98>2013-12-13 08:39:34 +0000
committerajm@chromium.org <ajm@chromium.org@0039d316-1c4b-4281-b951-d872f2087c98>2013-12-13 08:39:34 +0000
commit0eddc22125d3d2eef9690ba87e3bad4899b4d17c (patch)
treef61d5f6fee8fc4529ba9d327af091689ddc377af /content
parentbc92bc970efc1620e162efabedfee8592afcac06 (diff)
downloadchromium_src-0eddc22125d3d2eef9690ba87e3bad4899b4d17c.zip
chromium_src-0eddc22125d3d2eef9690ba87e3bad4899b4d17c.tar.gz
chromium_src-0eddc22125d3d2eef9690ba87e3bad4899b4d17c.tar.bz2
Enable platform echo cancellation through the AudioRecord path.
Add a platform effects mask member to AudioParameters. This allows the availability of platform effects (currently AEC) to be plumbed up to MediaStreamDependencyFactory, where they can be reconciled with the media constraints to determine if the effects should be enabled. When this is the case, the constraints will be modified to disable the corresponding software effect in PeerConnection. The availability is controlled by a whitelist of device models in AudioManagerAndroid, which for AEC, currently consists of Nexus 5 and Nexus 7. AudioManagerAndroid will use the AudioRecord path iff the platform AEC is enabled. TESTED=Using apprtc on a N5 and N7 (whitelisted): - The AudioRecord input path is used. - The platform AEC is enabled and the software AEC (in PeerConnection) is disabled. - Calls have good echo cancellation quality. Using apprtc with ?audio=googEchoCancellation=false on a N5 and N7: - The OpenSLES input path is used. - Both the platform and software AEC are disabled. Using apprtc on Nexus 4 (non-whitelisted): - The OpenSLES input path is used. - The platform AEC is disabled and the software AEC is enabled. Using apprtc on Galaxy S2 (running ICS): - The OpenSLES input path is used. audio_android_unittest.cc passes on N5, N7 and Galaxy S2 TBR=jschuh Review URL: https://codereview.chromium.org/99033003 git-svn-id: svn://svn.chromium.org/chrome/trunk/src@240548 0039d316-1c4b-4281-b951-d872f2087c98
Diffstat (limited to 'content')
-rw-r--r--content/browser/renderer_host/media/audio_input_device_manager.cc1
-rw-r--r--content/browser/renderer_host/media/audio_renderer_host_unittest.cc3
-rw-r--r--content/common/media/media_param_traits.cc14
-rw-r--r--content/common/media/media_stream_messages.h1
-rw-r--r--content/public/common/media_stream_request.h10
-rw-r--r--content/renderer/media/media_stream_dependency_factory.cc45
-rw-r--r--content/renderer/media/webrtc_audio_capturer.cc34
-rw-r--r--content/renderer/media/webrtc_audio_capturer.h9
-rw-r--r--content/renderer/media/webrtc_audio_capturer_unittest.cc6
-rw-r--r--content/renderer/media/webrtc_audio_device_unittest.cc3
-rw-r--r--content/renderer/media/webrtc_local_audio_track_unittest.cc14
-rw-r--r--content/renderer/renderer_webkitplatformsupport_impl.cc3
12 files changed, 104 insertions, 39 deletions
diff --git a/content/browser/renderer_host/media/audio_input_device_manager.cc b/content/browser/renderer_host/media/audio_input_device_manager.cc
index 00cd435..cfe00ca 100644
--- a/content/browser/renderer_host/media/audio_input_device_manager.cc
+++ b/content/browser/renderer_host/media/audio_input_device_manager.cc
@@ -187,6 +187,7 @@ void AudioInputDeviceManager::OpenOnDeviceThread(
input_params.sample_rate = params.sample_rate();
input_params.channel_layout = params.channel_layout();
input_params.frames_per_buffer = params.frames_per_buffer();
+ input_params.effects = params.effects();
// Add preferred output device information if a matching output device
// exists.
diff --git a/content/browser/renderer_host/media/audio_renderer_host_unittest.cc b/content/browser/renderer_host/media/audio_renderer_host_unittest.cc
index 87f38a7..ff36a27 100644
--- a/content/browser/renderer_host/media/audio_renderer_host_unittest.cc
+++ b/content/browser/renderer_host/media/audio_renderer_host_unittest.cc
@@ -198,7 +198,8 @@ class AudioRendererHostTest : public testing::Test {
media::CHANNEL_LAYOUT_STEREO,
2,
media::AudioParameters::kAudioCDSampleRate, 16,
- media::AudioParameters::kAudioCDSampleRate / 10);
+ media::AudioParameters::kAudioCDSampleRate / 10,
+ media::AudioParameters::NO_EFFECTS);
} else {
session_id = 0;
params = media::AudioParameters(
diff --git a/content/common/media/media_param_traits.cc b/content/common/media/media_param_traits.cc
index f6eaec1..3ec6a25 100644
--- a/content/common/media/media_param_traits.cc
+++ b/content/common/media/media_param_traits.cc
@@ -25,13 +25,14 @@ void ParamTraits<AudioParameters>::Write(Message* m,
m->WriteInt(p.frames_per_buffer());
m->WriteInt(p.channels());
m->WriteInt(p.input_channels());
+ m->WriteInt(p.effects());
}
bool ParamTraits<AudioParameters>::Read(const Message* m,
PickleIterator* iter,
AudioParameters* r) {
int format, channel_layout, sample_rate, bits_per_sample,
- frames_per_buffer, channels, input_channels;
+ frames_per_buffer, channels, input_channels, effects;
if (!m->ReadInt(iter, &format) ||
!m->ReadInt(iter, &channel_layout) ||
@@ -39,11 +40,14 @@ bool ParamTraits<AudioParameters>::Read(const Message* m,
!m->ReadInt(iter, &bits_per_sample) ||
!m->ReadInt(iter, &frames_per_buffer) ||
!m->ReadInt(iter, &channels) ||
- !m->ReadInt(iter, &input_channels))
+ !m->ReadInt(iter, &input_channels) ||
+ !m->ReadInt(iter, &effects))
return false;
- r->Reset(static_cast<AudioParameters::Format>(format),
- static_cast<ChannelLayout>(channel_layout), channels,
- input_channels, sample_rate, bits_per_sample, frames_per_buffer);
+ AudioParameters params(static_cast<AudioParameters::Format>(format),
+ static_cast<ChannelLayout>(channel_layout), channels,
+ input_channels, sample_rate, bits_per_sample, frames_per_buffer,
+ effects);
+ *r = params;
if (!r->IsValid())
return false;
return true;
diff --git a/content/common/media/media_stream_messages.h b/content/common/media/media_stream_messages.h
index c6e44a2..4cedc3f 100644
--- a/content/common/media/media_stream_messages.h
+++ b/content/common/media/media_stream_messages.h
@@ -38,6 +38,7 @@ IPC_STRUCT_TRAITS_BEGIN(content::StreamDeviceInfo)
IPC_STRUCT_TRAITS_MEMBER(device.input.sample_rate)
IPC_STRUCT_TRAITS_MEMBER(device.input.channel_layout)
IPC_STRUCT_TRAITS_MEMBER(device.input.frames_per_buffer)
+ IPC_STRUCT_TRAITS_MEMBER(device.input.effects)
IPC_STRUCT_TRAITS_MEMBER(device.matched_output.sample_rate)
IPC_STRUCT_TRAITS_MEMBER(device.matched_output.channel_layout)
IPC_STRUCT_TRAITS_MEMBER(device.matched_output.frames_per_buffer)
diff --git a/content/public/common/media_stream_request.h b/content/public/common/media_stream_request.h
index e6870e1..a117931 100644
--- a/content/public/common/media_stream_request.h
+++ b/content/public/common/media_stream_request.h
@@ -106,14 +106,15 @@ struct CONTENT_EXPORT MediaStreamDevice {
// in media::AudioParameters.
struct AudioDeviceParameters {
AudioDeviceParameters()
- : sample_rate(), channel_layout(), frames_per_buffer() {
+ : sample_rate(), channel_layout(), frames_per_buffer(), effects() {
}
AudioDeviceParameters(int sample_rate, int channel_layout,
- int frames_per_buffer)
+ int frames_per_buffer)
: sample_rate(sample_rate),
channel_layout(channel_layout),
- frames_per_buffer(frames_per_buffer) {
+ frames_per_buffer(frames_per_buffer),
+ effects() {
}
// Preferred sample rate in samples per second for the device.
@@ -129,6 +130,9 @@ struct CONTENT_EXPORT MediaStreamDevice {
// expected browser side settings and avoid unnecessary buffering.
// See media::AudioParameters for more.
int frames_per_buffer;
+
+ // See media::AudioParameters::PlatformEffectsMask.
+ int effects;
};
// These below two member variables are valid only when the type of device is
diff --git a/content/renderer/media/media_stream_dependency_factory.cc b/content/renderer/media/media_stream_dependency_factory.cc
index 6172e06..8da7b14 100644
--- a/content/renderer/media/media_stream_dependency_factory.cc
+++ b/content/renderer/media/media_stream_dependency_factory.cc
@@ -78,6 +78,15 @@ struct {
webrtc::MediaConstraintsInterface::kValueTrue },
};
+// Map of corresponding media constraints and platform effects.
+struct {
+ const char* constraint;
+ const media::AudioParameters::PlatformEffectsMask effect;
+} const kConstraintEffectMap[] = {
+ { webrtc::MediaConstraintsInterface::kEchoCancellation,
+ media::AudioParameters::ECHO_CANCELLER},
+};
+
// Merge |constraints| with |kDefaultAudioConstraints|. For any key which exists
// in both, the value from |constraints| is maintained, including its
// mandatory/optional status. New values from |kDefaultAudioConstraints| will
@@ -323,7 +332,36 @@ void MediaStreamDependencyFactory::CreateNativeMediaSources(
// TODO(xians): Create a new capturer for difference microphones when we
// support multiple microphones. See issue crbug/262117 .
- const StreamDeviceInfo device_info = source_data->device_info();
+ StreamDeviceInfo device_info = source_data->device_info();
+ RTCMediaConstraints constraints = native_audio_constraints;
+
+ // If any platform effects are available, check them against the
+ // constraints. Disable effects to match false constraints, but if a
+ // constraint is true, set the constraint to false to later disable the
+ // software effect.
+ int effects = device_info.device.input.effects;
+ if (effects != media::AudioParameters::NO_EFFECTS) {
+ for (size_t i = 0; i < ARRAYSIZE_UNSAFE(kConstraintEffectMap); ++i) {
+ bool value;
+ if (!webrtc::FindConstraint(&constraints,
+ kConstraintEffectMap[i].constraint, &value, NULL) || !value) {
+ // If the constraint is false, or does not exist, disable the platform
+ // effect.
+ effects &= ~kConstraintEffectMap[i].effect;
+ DVLOG(1) << "Disabling constraint: "
+ << kConstraintEffectMap[i].constraint;
+ } else if (effects & kConstraintEffectMap[i].effect) {
+ // If the constraint is true, leave the platform effect enabled, and
+ // set the constraint to false to later disable the software effect.
+ constraints.AddMandatory(kConstraintEffectMap[i].constraint,
+ webrtc::MediaConstraintsInterface::kValueFalse, true);
+ DVLOG(1) << "Disabling platform effect: "
+ << kConstraintEffectMap[i].constraint;
+ }
+ }
+ device_info.device.input.effects = effects;
+ }
+
scoped_refptr<WebRtcAudioCapturer> capturer(
MaybeCreateAudioCapturer(render_view_id, device_info));
if (!capturer.get()) {
@@ -341,7 +379,7 @@ void MediaStreamDependencyFactory::CreateNativeMediaSources(
// Creates a LocalAudioSource object which holds audio options.
// TODO(xians): The option should apply to the track instead of the source.
source_data->SetLocalAudioSource(
- CreateLocalAudioSource(&native_audio_constraints).get());
+ CreateLocalAudioSource(&constraints).get());
source_observer->AddSource(source_data->local_audio_source());
}
@@ -910,7 +948,8 @@ MediaStreamDependencyFactory::MaybeCreateAudioCapturer(
device_info.session_id,
device_info.device.id,
device_info.device.matched_output.sample_rate,
- device_info.device.matched_output.frames_per_buffer)) {
+ device_info.device.matched_output.frames_per_buffer,
+ device_info.device.input.effects)) {
return NULL;
}
diff --git a/content/renderer/media/webrtc_audio_capturer.cc b/content/renderer/media/webrtc_audio_capturer.cc
index 67b4628..391e7d7 100644
--- a/content/renderer/media/webrtc_audio_capturer.cc
+++ b/content/renderer/media/webrtc_audio_capturer.cc
@@ -114,7 +114,8 @@ scoped_refptr<WebRtcAudioCapturer> WebRtcAudioCapturer::CreateCapturer() {
}
void WebRtcAudioCapturer::Reconfigure(int sample_rate,
- media::ChannelLayout channel_layout) {
+ media::ChannelLayout channel_layout,
+ int effects) {
DCHECK(thread_checker_.CalledOnValidThread());
int buffer_size = GetBufferSize(sample_rate);
DVLOG(1) << "Using WebRTC input buffer size: " << buffer_size;
@@ -124,9 +125,8 @@ void WebRtcAudioCapturer::Reconfigure(int sample_rate,
// bits_per_sample is always 16 for now.
int bits_per_sample = 16;
- media::AudioParameters params(format, channel_layout, sample_rate,
- bits_per_sample, buffer_size);
-
+ media::AudioParameters params(format, channel_layout, 0, sample_rate,
+ bits_per_sample, buffer_size, effects);
{
base::AutoLock auto_lock(lock_);
params_ = params;
@@ -137,13 +137,14 @@ void WebRtcAudioCapturer::Reconfigure(int sample_rate,
}
bool WebRtcAudioCapturer::Initialize(int render_view_id,
- media::ChannelLayout channel_layout,
- int sample_rate,
- int buffer_size,
- int session_id,
- const std::string& device_id,
- int paired_output_sample_rate,
- int paired_output_frames_per_buffer) {
+ media::ChannelLayout channel_layout,
+ int sample_rate,
+ int buffer_size,
+ int session_id,
+ const std::string& device_id,
+ int paired_output_sample_rate,
+ int paired_output_frames_per_buffer,
+ int effects) {
DCHECK(thread_checker_.CalledOnValidThread());
DVLOG(1) << "WebRtcAudioCapturer::Initialize()";
@@ -209,7 +210,8 @@ bool WebRtcAudioCapturer::Initialize(int render_view_id,
// providing an alternative media::AudioCapturerSource.
SetCapturerSource(AudioDeviceFactory::NewInputDevice(render_view_id),
channel_layout,
- static_cast<float>(sample_rate));
+ static_cast<float>(sample_rate),
+ effects);
return true;
}
@@ -282,7 +284,8 @@ void WebRtcAudioCapturer::RemoveTrack(WebRtcLocalAudioTrack* track) {
void WebRtcAudioCapturer::SetCapturerSource(
const scoped_refptr<media::AudioCapturerSource>& source,
media::ChannelLayout channel_layout,
- float sample_rate) {
+ float sample_rate,
+ int effects) {
DCHECK(thread_checker_.CalledOnValidThread());
DVLOG(1) << "SetCapturerSource(channel_layout=" << channel_layout << ","
<< "sample_rate=" << sample_rate << ")";
@@ -308,7 +311,7 @@ void WebRtcAudioCapturer::SetCapturerSource(
// Dispatch the new parameters both to the sink(s) and to the new source.
// The idea is to get rid of any dependency of the microphone parameters
// which would normally be used by default.
- Reconfigure(sample_rate, channel_layout);
+ Reconfigure(sample_rate, channel_layout, effects);
// Make sure to grab the new parameters in case they were reconfigured.
media::AudioParameters params = audio_parameters();
@@ -347,7 +350,8 @@ void WebRtcAudioCapturer::EnablePeerConnectionMode() {
// WebRtc native buffer size.
SetCapturerSource(AudioDeviceFactory::NewInputDevice(render_view_id),
params.channel_layout(),
- static_cast<float>(params.sample_rate()));
+ static_cast<float>(params.sample_rate()),
+ params.effects());
}
void WebRtcAudioCapturer::Start() {
diff --git a/content/renderer/media/webrtc_audio_capturer.h b/content/renderer/media/webrtc_audio_capturer.h
index 37fbf0a..2339114 100644
--- a/content/renderer/media/webrtc_audio_capturer.h
+++ b/content/renderer/media/webrtc_audio_capturer.h
@@ -56,7 +56,8 @@ class CONTENT_EXPORT WebRtcAudioCapturer
int session_id,
const std::string& device_id,
int paired_output_sample_rate,
- int paired_output_frames_per_buffer);
+ int paired_output_frames_per_buffer,
+ int effects);
// Add a audio track to the sinks of the capturer.
// WebRtcAudioDeviceImpl calls this method on the main render thread but
@@ -79,7 +80,8 @@ class CONTENT_EXPORT WebRtcAudioCapturer
void SetCapturerSource(
const scoped_refptr<media::AudioCapturerSource>& source,
media::ChannelLayout channel_layout,
- float sample_rate);
+ float sample_rate,
+ int effects);
// Called when a stream is connecting to a peer connection. This will set
// up the native buffer size for the stream in order to optimize the
@@ -142,7 +144,8 @@ class CONTENT_EXPORT WebRtcAudioCapturer
// Reconfigures the capturer with a new capture parameters.
// Must be called without holding the lock.
- void Reconfigure(int sample_rate, media::ChannelLayout channel_layout);
+ void Reconfigure(int sample_rate, media::ChannelLayout channel_layout,
+ int effects);
// Starts recording audio.
// Triggered by AddSink() on the main render thread or a Libjingle working
diff --git a/content/renderer/media/webrtc_audio_capturer_unittest.cc b/content/renderer/media/webrtc_audio_capturer_unittest.cc
index cc7d528..184ba01 100644
--- a/content/renderer/media/webrtc_audio_capturer_unittest.cc
+++ b/content/renderer/media/webrtc_audio_capturer_unittest.cc
@@ -96,12 +96,14 @@ class WebRtcAudioCapturerTest : public testing::Test {
#endif
capturer_ = WebRtcAudioCapturer::CreateCapturer();
capturer_->Initialize(-1, params_.channel_layout(), params_.sample_rate(),
- params_.frames_per_buffer(), 0, std::string(), 0, 0);
+ params_.frames_per_buffer(), 0, std::string(), 0, 0,
+ params_.effects());
capturer_source_ = new MockCapturerSource();
EXPECT_CALL(*capturer_source_.get(), Initialize(_, capturer_.get(), 0));
capturer_->SetCapturerSource(capturer_source_,
params_.channel_layout(),
- params_.sample_rate());
+ params_.sample_rate(),
+ params_.effects());
EXPECT_CALL(*capturer_source_.get(), SetAutomaticGainControl(true));
EXPECT_CALL(*capturer_source_.get(), Start());
diff --git a/content/renderer/media/webrtc_audio_device_unittest.cc b/content/renderer/media/webrtc_audio_device_unittest.cc
index 73f0885..d6821f6f 100644
--- a/content/renderer/media/webrtc_audio_device_unittest.cc
+++ b/content/renderer/media/webrtc_audio_device_unittest.cc
@@ -120,7 +120,8 @@ bool CreateAndInitializeCapturer(WebRtcAudioDeviceImpl* webrtc_audio_device) {
media::ChannelLayout channel_layout =
hardware_config->GetInputChannelLayout();
if (!capturer->Initialize(kRenderViewId, channel_layout, sample_rate, 0, 1,
- media::AudioManagerBase::kDefaultDeviceId, 0 ,0)) {
+ media::AudioManagerBase::kDefaultDeviceId, 0, 0,
+ media::AudioParameters::NO_EFFECTS)) {
return false;
}
diff --git a/content/renderer/media/webrtc_local_audio_track_unittest.cc b/content/renderer/media/webrtc_local_audio_track_unittest.cc
index f5b668a..a435776 100644
--- a/content/renderer/media/webrtc_local_audio_track_unittest.cc
+++ b/content/renderer/media/webrtc_local_audio_track_unittest.cc
@@ -168,7 +168,8 @@ class WebRtcLocalAudioTrackTest : public ::testing::Test {
.WillOnce(Return());
capturer_->SetCapturerSource(capturer_source_,
params_.channel_layout(),
- params_.sample_rate());
+ params_.sample_rate(),
+ params_.effects());
}
media::AudioParameters params_;
@@ -460,7 +461,8 @@ TEST_F(WebRtcLocalAudioTrackTest, SetNewSourceForCapturerAfterStartTrack) {
EXPECT_CALL(*new_source.get(), OnStart());
capturer_->SetCapturerSource(new_source,
params_.channel_layout(),
- params_.sample_rate());
+ params_.sample_rate(),
+ params_.effects());
// Stop the track.
EXPECT_CALL(*new_source.get(), OnStop());
@@ -504,7 +506,8 @@ TEST_F(WebRtcLocalAudioTrackTest, ConnectTracksToDifferentCapturers) {
EXPECT_CALL(*new_source.get(), OnInitialize(_, new_capturer.get(), 0));
new_capturer->SetCapturerSource(new_source,
media::CHANNEL_LAYOUT_MONO,
- 44100);
+ 44100,
+ media::AudioParameters::NO_EFFECTS);
// Setup the second audio track, connect it to the new capturer and start it.
EXPECT_CALL(*new_source.get(), SetAutomaticGainControl(true));
@@ -560,11 +563,12 @@ TEST_F(WebRtcLocalAudioTrackTest, TrackWorkWithSmallBufferSize) {
scoped_refptr<MockCapturerSource> source(
new MockCapturerSource(capturer.get()));
capturer->Initialize(-1, params.channel_layout(), params.sample_rate(),
- params.frames_per_buffer(), 0, std::string(), 0, 0);
+ params.frames_per_buffer(), 0, std::string(), 0, 0,
+ params.effects());
EXPECT_CALL(*source.get(), OnInitialize(_, capturer.get(), 0));
capturer->SetCapturerSource(source, params.channel_layout(),
- params.sample_rate());
+ params.sample_rate(), params.effects());
// Setup a audio track, connect it to the capturer and start it.
EXPECT_CALL(*source.get(), SetAutomaticGainControl(true));
diff --git a/content/renderer/renderer_webkitplatformsupport_impl.cc b/content/renderer/renderer_webkitplatformsupport_impl.cc
index 79cc75a..6013a05 100644
--- a/content/renderer/renderer_webkitplatformsupport_impl.cc
+++ b/content/renderer/renderer_webkitplatformsupport_impl.cc
@@ -726,7 +726,8 @@ RendererWebKitPlatformSupportImpl::createAudioDevice(
media::AudioParameters params(
media::AudioParameters::AUDIO_PCM_LOW_LATENCY,
layout, input_channels,
- static_cast<int>(sample_rate), 16, buffer_size);
+ static_cast<int>(sample_rate), 16, buffer_size,
+ media::AudioParameters::NO_EFFECTS);
return new RendererWebAudioDeviceImpl(params, callback, session_id);
}