summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--content/content_renderer.gypi2
-rw-r--r--content/content_tests.gypi1
-rw-r--r--content/renderer/media/media_stream_dependency_factory.cc63
-rw-r--r--content/renderer/media/media_stream_dependency_factory.h7
-rw-r--r--content/renderer/media/media_stream_impl.cc4
-rw-r--r--content/renderer/media/media_stream_source_extra_data.h6
-rw-r--r--content/renderer/media/mock_media_stream_dependency_factory.cc7
-rw-r--r--content/renderer/media/mock_media_stream_dependency_factory.h5
-rw-r--r--content/renderer/media/rtc_peer_connection_handler.cc13
-rw-r--r--content/renderer/media/rtc_peer_connection_handler_unittest.cc3
-rw-r--r--content/renderer/media/webaudio_capturer_source.cc89
-rw-r--r--content/renderer/media/webaudio_capturer_source.h60
-rw-r--r--content/renderer/media/webrtc_audio_capturer.cc239
-rw-r--r--content/renderer/media/webrtc_audio_capturer.h42
-rw-r--r--content/renderer/media/webrtc_audio_device_unittest.cc4
-rw-r--r--content/renderer/media/webrtc_local_audio_source_provider.cc155
-rw-r--r--content/renderer/media/webrtc_local_audio_source_provider.h109
-rw-r--r--content/renderer/media/webrtc_local_audio_source_provider_unittest.cc121
-rw-r--r--content/renderer/media/webrtc_local_audio_track.cc175
-rw-r--r--content/renderer/media/webrtc_local_audio_track.h45
-rw-r--r--content/renderer/media/webrtc_local_audio_track_unittest.cc52
21 files changed, 886 insertions, 316 deletions
diff --git a/content/content_renderer.gypi b/content/content_renderer.gypi
index 9442fdc..f74d371 100644
--- a/content/content_renderer.gypi
+++ b/content/content_renderer.gypi
@@ -635,6 +635,8 @@
'renderer/media/webrtc_identity_service.h',
'renderer/media/webrtc_local_audio_renderer.cc',
'renderer/media/webrtc_local_audio_renderer.h',
+ 'renderer/media/webrtc_local_audio_source_provider.cc',
+ 'renderer/media/webrtc_local_audio_source_provider.h',
'renderer/media/webrtc_local_audio_track.cc',
'renderer/media/webrtc_local_audio_track.h',
'renderer/media/webrtc_logging_initializer.cc',
diff --git a/content/content_tests.gypi b/content/content_tests.gypi
index 80971e4..ae647f1 100644
--- a/content/content_tests.gypi
+++ b/content/content_tests.gypi
@@ -670,6 +670,7 @@
'renderer/media/video_source_handler_unittest.cc',
'renderer/media/webrtc_audio_device_unittest.cc',
'renderer/media/webrtc_identity_service_unittest.cc',
+ 'renderer/media/webrtc_local_audio_source_provider_unittest.cc',
'renderer/media/webrtc_local_audio_track_unittest.cc',
],
'dependencies': [
diff --git a/content/renderer/media/media_stream_dependency_factory.cc b/content/renderer/media/media_stream_dependency_factory.cc
index 43eb75b..17b774c 100644
--- a/content/renderer/media/media_stream_dependency_factory.cc
+++ b/content/renderer/media/media_stream_dependency_factory.cc
@@ -381,8 +381,8 @@ void MediaStreamDependencyFactory::CreateNativeLocalMediaStream(
}
bool MediaStreamDependencyFactory::AddNativeMediaStreamTrack(
- const WebKit::WebMediaStream& stream,
- const WebKit::WebMediaStreamTrack& track) {
+ const WebKit::WebMediaStream& stream,
+ const WebKit::WebMediaStreamTrack& track) {
MediaStreamExtraData* extra_data =
static_cast<MediaStreamExtraData*>(stream.extraData());
webrtc::MediaStreamInterface* native_stream = extra_data->stream().get();
@@ -396,12 +396,12 @@ bool MediaStreamDependencyFactory::AddNativeMediaStreamTrack(
// right now they're on the source, so we fetch them from there.
RTCMediaConstraints track_constraints(source.constraints());
- scoped_refptr<WebRtcAudioCapturer> capturer;
+ scoped_refptr<WebAudioCapturerSource> webaudio_source;
if (!source_data) {
if (source.requiresAudioConsumer()) {
// We're adding a WebAudio MediaStream.
// Create a specific capturer for each WebAudio consumer.
- capturer = CreateWebAudioSource(&source, &track_constraints);
+ webaudio_source = CreateWebAudioSource(&source, &track_constraints);
source_data =
static_cast<MediaStreamSourceExtraData*>(source.extraData());
} else {
@@ -418,15 +418,21 @@ bool MediaStreamDependencyFactory::AddNativeMediaStreamTrack(
std::string track_id = UTF16ToUTF8(track.id());
if (source.type() == WebKit::WebMediaStreamSource::TypeAudio) {
- if (!capturer.get() && GetWebRtcAudioDevice())
+ scoped_refptr<WebRtcAudioCapturer> capturer;
+ if (GetWebRtcAudioDevice())
capturer = GetWebRtcAudioDevice()->GetDefaultCapturer();
scoped_refptr<webrtc::AudioTrackInterface> audio_track(
CreateLocalAudioTrack(track_id,
capturer,
+ webaudio_source.get(),
source_data->local_audio_source(),
&track_constraints));
audio_track->set_enabled(track.isEnabled());
+ if (capturer.get()) {
+ WebKit::WebMediaStreamTrack writable_track = track;
+ writable_track.setSourceProvider(capturer->audio_source_provider());
+ }
return native_stream->AddTrack(audio_track.get());
} else {
DCHECK(source.type() == WebKit::WebMediaStreamSource::TypeVideo);
@@ -484,9 +490,15 @@ bool MediaStreamDependencyFactory::RemoveNativeMediaStreamTrack(
type == WebKit::WebMediaStreamSource::TypeVideo);
std::string track_id = UTF16ToUTF8(track.id());
- return type == WebKit::WebMediaStreamSource::TypeAudio ?
- native_stream->RemoveTrack(native_stream->FindAudioTrack(track_id)) :
- native_stream->RemoveTrack(native_stream->FindVideoTrack(track_id));
+ if (type == WebKit::WebMediaStreamSource::TypeAudio) {
+ // Remove the source provider as the track is going away.
+ WebKit::WebMediaStreamTrack writable_track = track;
+ writable_track.setSourceProvider(NULL);
+ return native_stream->RemoveTrack(native_stream->FindAudioTrack(track_id));
+ }
+
+ CHECK_EQ(type, WebKit::WebMediaStreamSource::TypeVideo);
+ return native_stream->RemoveTrack(native_stream->FindVideoTrack(track_id));
}
bool MediaStreamDependencyFactory::CreatePeerConnectionFactory() {
@@ -605,26 +617,17 @@ MediaStreamDependencyFactory::CreateLocalVideoSource(
return source;
}
-scoped_refptr<WebRtcAudioCapturer>
+scoped_refptr<WebAudioCapturerSource>
MediaStreamDependencyFactory::CreateWebAudioSource(
WebKit::WebMediaStreamSource* source,
RTCMediaConstraints* constraints) {
DVLOG(1) << "MediaStreamDependencyFactory::CreateWebAudioSource()";
DCHECK(GetWebRtcAudioDevice());
- // Set up the source and ensure that WebAudio is driving things instead of
- // a microphone. For WebAudio, we always create a new capturer without
- // calling initialize(), WebAudio will re-configure the capturer later on.
- // Pass -1 as the |render_view_id| and an empty device struct to tell the
- // capturer not to start the default source.
- scoped_refptr<WebRtcAudioCapturer> capturer(
- MaybeCreateAudioCapturer(-1, StreamDeviceInfo()));
- DCHECK(capturer.get());
-
scoped_refptr<WebAudioCapturerSource>
- webaudio_capturer_source(new WebAudioCapturerSource(capturer.get()));
+ webaudio_capturer_source(new WebAudioCapturerSource());
MediaStreamSourceExtraData* source_data =
- new content::MediaStreamSourceExtraData(webaudio_capturer_source.get());
+ new content::MediaStreamSourceExtraData();
// Create a LocalAudioSource object which holds audio options.
// Use audio constraints where all values are true, i.e., enable
@@ -638,7 +641,7 @@ MediaStreamDependencyFactory::CreateWebAudioSource(
// Replace the default source with WebAudio as source instead.
source->addAudioConsumer(webaudio_capturer_source.get());
- return capturer;
+ return webaudio_capturer_source;
}
scoped_refptr<webrtc::VideoTrackInterface>
@@ -668,13 +671,16 @@ scoped_refptr<webrtc::AudioTrackInterface>
MediaStreamDependencyFactory::CreateLocalAudioTrack(
const std::string& id,
const scoped_refptr<WebRtcAudioCapturer>& capturer,
+ WebAudioCapturerSource* webaudio_source,
webrtc::AudioSourceInterface* source,
const webrtc::MediaConstraintsInterface* constraints) {
// TODO(xians): Merge |source| to the capturer(). We can't do this today
// because only one capturer() is supported while one |source| is created
// for each audio track.
scoped_refptr<WebRtcLocalAudioTrack> audio_track(
- WebRtcLocalAudioTrack::Create(id, capturer, source, constraints));
+ WebRtcLocalAudioTrack::Create(id, capturer, webaudio_source,
+ source, constraints));
+
// Add the WebRtcAudioDevice as the sink to the local audio track.
audio_track->AddSink(GetWebRtcAudioDevice());
// Start the audio track. This will hook the |audio_track| to the capturer
@@ -811,11 +817,13 @@ scoped_refptr<WebRtcAudioCapturer>
MediaStreamDependencyFactory::MaybeCreateAudioCapturer(
int render_view_id,
const StreamDeviceInfo& device_info) {
- scoped_refptr<WebRtcAudioCapturer> capturer;
- if (render_view_id != -1) {
- // From a normal getUserMedia, re-use the existing default capturer.
- capturer = GetWebRtcAudioDevice()->GetDefaultCapturer();
- }
+ // TODO(xians): Handle the cases when gUM is called without a proper render
+ // view, for example, by an extension.
+ DCHECK_GE(render_view_id, 0);
+
+ scoped_refptr<WebRtcAudioCapturer> capturer =
+ GetWebRtcAudioDevice()->GetDefaultCapturer();
+
// If the default capturer does not exist or |render_view_id| == -1, create
// a new capturer.
bool is_new_capturer = false;
@@ -829,6 +837,7 @@ MediaStreamDependencyFactory::MaybeCreateAudioCapturer(
static_cast<media::ChannelLayout>(
device_info.device.input.channel_layout),
device_info.device.input.sample_rate,
+ device_info.device.input.frames_per_buffer,
device_info.session_id,
device_info.device.id)) {
return NULL;
diff --git a/content/renderer/media/media_stream_dependency_factory.h b/content/renderer/media/media_stream_dependency_factory.h
index c89f274..a728d885 100644
--- a/content/renderer/media/media_stream_dependency_factory.h
+++ b/content/renderer/media/media_stream_dependency_factory.h
@@ -44,6 +44,7 @@ class IpcNetworkManager;
class IpcPacketSocketFactory;
class RTCMediaConstraints;
class VideoCaptureImplManager;
+class WebAudioCapturerSource;
class WebRtcAudioCapturer;
class WebRtcAudioDeviceImpl;
class WebRtcLoggingHandlerImpl;
@@ -164,15 +165,15 @@ class CONTENT_EXPORT MediaStreamDependencyFactory
// WebRtcAudioCapturer.
// The |constraints| will be modified to include the default, mandatory
// WebAudio constraints.
- virtual scoped_refptr<WebRtcAudioCapturer> CreateWebAudioSource(
- WebKit::WebMediaStreamSource* source,
- RTCMediaConstraints* constraints);
+ virtual scoped_refptr<WebAudioCapturerSource> CreateWebAudioSource(
+ WebKit::WebMediaStreamSource* source, RTCMediaConstraints* constraints);
// Asks the PeerConnection factory to create a Local AudioTrack object.
virtual scoped_refptr<webrtc::AudioTrackInterface>
CreateLocalAudioTrack(
const std::string& id,
const scoped_refptr<WebRtcAudioCapturer>& capturer,
+ WebAudioCapturerSource* webaudio_source,
webrtc::AudioSourceInterface* source,
const webrtc::MediaConstraintsInterface* constraints);
diff --git a/content/renderer/media/media_stream_impl.cc b/content/renderer/media/media_stream_impl.cc
index d5dedc6..96dffde 100644
--- a/content/renderer/media/media_stream_impl.cc
+++ b/content/renderer/media/media_stream_impl.cc
@@ -727,9 +727,7 @@ MediaStreamSourceExtraData::MediaStreamSourceExtraData(
webkit_source_(webkit_source) {
}
-MediaStreamSourceExtraData::MediaStreamSourceExtraData(
- media::AudioCapturerSource* source)
- : audio_source_(source) {
+MediaStreamSourceExtraData::MediaStreamSourceExtraData() {
}
MediaStreamSourceExtraData::~MediaStreamSourceExtraData() {}
diff --git a/content/renderer/media/media_stream_source_extra_data.h b/content/renderer/media/media_stream_source_extra_data.h
index c997832..bb06435 100644
--- a/content/renderer/media/media_stream_source_extra_data.h
+++ b/content/renderer/media/media_stream_source_extra_data.h
@@ -9,7 +9,6 @@
#include "content/common/content_export.h"
#include "content/common/media/media_stream_options.h"
#include "content/renderer/media/media_stream_source_observer.h"
-#include "media/base/audio_capturer_source.h"
#include "third_party/libjingle/source/talk/app/webrtc/videosourceinterface.h"
#include "third_party/WebKit/public/platform/WebMediaStreamSource.h"
@@ -21,8 +20,7 @@ class CONTENT_EXPORT MediaStreamSourceExtraData
MediaStreamSourceExtraData(
const StreamDeviceInfo& device_info,
const WebKit::WebMediaStreamSource& webkit_source);
- explicit MediaStreamSourceExtraData(
- media::AudioCapturerSource* source);
+ MediaStreamSourceExtraData();
virtual ~MediaStreamSourceExtraData();
// Returns the WebMediaStreamSource object that owns this object.
@@ -47,7 +45,6 @@ class CONTENT_EXPORT MediaStreamSourceExtraData
}
webrtc::VideoSourceInterface* video_source() { return video_source_.get(); }
- media::AudioCapturerSource* audio_source() { return audio_source_.get(); }
webrtc::AudioSourceInterface* local_audio_source() {
return local_audio_source_.get();
}
@@ -65,7 +62,6 @@ class CONTENT_EXPORT MediaStreamSourceExtraData
// MediaStreamImpl::~UserMediaRequestInfo() does.
WebKit::WebMediaStreamSource webkit_source_;
scoped_refptr<webrtc::VideoSourceInterface> video_source_;
- scoped_refptr<media::AudioCapturerSource> audio_source_;
// This member holds an instance of webrtc::LocalAudioSource. This is used
// as a container for audio options.
diff --git a/content/renderer/media/mock_media_stream_dependency_factory.cc b/content/renderer/media/mock_media_stream_dependency_factory.cc
index 5b58c5e..26c3275 100644
--- a/content/renderer/media/mock_media_stream_dependency_factory.cc
+++ b/content/renderer/media/mock_media_stream_dependency_factory.cc
@@ -7,6 +7,7 @@
#include "base/logging.h"
#include "base/strings/utf_string_conversions.h"
#include "content/renderer/media/mock_peer_connection_impl.h"
+#include "content/renderer/media/webaudio_capturer_source.h"
#include "content/renderer/media/webrtc_audio_capturer.h"
#include "content/renderer/media/webrtc_local_audio_track.h"
#include "third_party/libjingle/source/talk/app/webrtc/mediastreaminterface.h"
@@ -406,7 +407,7 @@ MockMediaStreamDependencyFactory::CreateLocalVideoSource(
return last_video_source_;
}
-scoped_refptr<WebRtcAudioCapturer>
+scoped_refptr<WebAudioCapturerSource>
MockMediaStreamDependencyFactory::CreateWebAudioSource(
WebKit::WebMediaStreamSource* source,
RTCMediaConstraints* constraints) {
@@ -448,12 +449,14 @@ scoped_refptr<webrtc::AudioTrackInterface>
MockMediaStreamDependencyFactory::CreateLocalAudioTrack(
const std::string& id,
const scoped_refptr<WebRtcAudioCapturer>& capturer,
+ WebAudioCapturerSource* webaudio_source,
webrtc::AudioSourceInterface* source,
const webrtc::MediaConstraintsInterface* constraints) {
DCHECK(mock_pc_factory_created_);
DCHECK(!capturer.get());
return WebRtcLocalAudioTrack::Create(
- id, WebRtcAudioCapturer::CreateCapturer(), source, constraints);
+ id, WebRtcAudioCapturer::CreateCapturer(), webaudio_source,
+ source, constraints);
}
SessionDescriptionInterface*
diff --git a/content/renderer/media/mock_media_stream_dependency_factory.h b/content/renderer/media/mock_media_stream_dependency_factory.h
index aa73f9a..446b0ad 100644
--- a/content/renderer/media/mock_media_stream_dependency_factory.h
+++ b/content/renderer/media/mock_media_stream_dependency_factory.h
@@ -14,6 +14,8 @@
namespace content {
+class WebAudioCapturerSource;
+
class MockVideoSource : public webrtc::VideoSourceInterface {
public:
MockVideoSource();
@@ -126,7 +128,7 @@ class MockMediaStreamDependencyFactory : public MediaStreamDependencyFactory {
int video_session_id,
bool is_screencast,
const webrtc::MediaConstraintsInterface* constraints) OVERRIDE;
- virtual scoped_refptr<WebRtcAudioCapturer> CreateWebAudioSource(
+ virtual scoped_refptr<WebAudioCapturerSource> CreateWebAudioSource(
WebKit::WebMediaStreamSource* source,
RTCMediaConstraints* constraints) OVERRIDE;
virtual scoped_refptr<webrtc::MediaStreamInterface>
@@ -140,6 +142,7 @@ class MockMediaStreamDependencyFactory : public MediaStreamDependencyFactory {
virtual scoped_refptr<webrtc::AudioTrackInterface> CreateLocalAudioTrack(
const std::string& id,
const scoped_refptr<WebRtcAudioCapturer>& capturer,
+ WebAudioCapturerSource* webaudio_source,
webrtc::AudioSourceInterface* source,
const webrtc::MediaConstraintsInterface* constraints) OVERRIDE;
virtual webrtc::SessionDescriptionInterface* CreateSessionDescription(
diff --git a/content/renderer/media/rtc_peer_connection_handler.cc b/content/renderer/media/rtc_peer_connection_handler.cc
index b361469..127a7e2 100644
--- a/content/renderer/media/rtc_peer_connection_handler.cc
+++ b/content/renderer/media/rtc_peer_connection_handler.cc
@@ -20,6 +20,8 @@
#include "content/renderer/media/rtc_data_channel_handler.h"
#include "content/renderer/media/rtc_dtmf_sender_handler.h"
#include "content/renderer/media/rtc_media_constraints.h"
+#include "content/renderer/media/webrtc_audio_capturer.h"
+#include "content/renderer/media/webrtc_audio_device_impl.h"
#include "content/renderer/render_thread_impl.h"
#include "third_party/WebKit/public/platform/WebMediaConstraints.h"
// TODO(hta): Move the following include to WebRTCStatsRequest.h file.
@@ -528,6 +530,17 @@ bool RTCPeerConnectionHandler::addStream(
if (peer_connection_tracker_)
peer_connection_tracker_->TrackAddStream(
this, stream, PeerConnectionTracker::SOURCE_LOCAL);
+
+ // A media stream is connected to a peer connection, enable the
+ // peer connection mode for the capturer.
+ WebRtcAudioDeviceImpl* audio_device =
+ dependency_factory_->GetWebRtcAudioDevice();
+ if (audio_device) {
+ WebRtcAudioCapturer* capturer = audio_device->GetDefaultCapturer();
+ if (capturer)
+ capturer->EnablePeerConnectionMode();
+ }
+
return AddStream(stream, &constraints);
}
diff --git a/content/renderer/media/rtc_peer_connection_handler_unittest.cc b/content/renderer/media/rtc_peer_connection_handler_unittest.cc
index 0b7a75b..9528cbe 100644
--- a/content/renderer/media/rtc_peer_connection_handler_unittest.cc
+++ b/content/renderer/media/rtc_peer_connection_handler_unittest.cc
@@ -253,7 +253,7 @@ class RTCPeerConnectionHandlerTest : public ::testing::Test {
RTCMediaConstraints audio_constraints(audio_source.constraints());
scoped_refptr<webrtc::AudioTrackInterface> audio_track(
mock_dependency_factory_->CreateLocalAudioTrack(
- audio_track_id, capturer, NULL,
+ audio_track_id, capturer, NULL, NULL,
&audio_constraints));
native_stream->AddTrack(audio_track.get());
@@ -291,6 +291,7 @@ class RTCPeerConnectionHandlerTest : public ::testing::Test {
mock_dependency_factory_->CreateLocalAudioTrack(audio_track_label,
capturer,
NULL,
+ NULL,
NULL));
stream->AddTrack(audio_track.get());
}
diff --git a/content/renderer/media/webaudio_capturer_source.cc b/content/renderer/media/webaudio_capturer_source.cc
index 35cd99c..263ba47 100644
--- a/content/renderer/media/webaudio_capturer_source.cc
+++ b/content/renderer/media/webaudio_capturer_source.cc
@@ -5,7 +5,8 @@
#include "content/renderer/media/webaudio_capturer_source.h"
#include "base/logging.h"
-#include "content/renderer/media/webrtc_audio_capturer.h"
+#include "content/renderer/media/webrtc_local_audio_source_provider.h"
+#include "content/renderer/media/webrtc_local_audio_track.h"
using media::AudioBus;
using media::AudioFifo;
@@ -14,15 +15,13 @@ using media::ChannelLayout;
using media::CHANNEL_LAYOUT_MONO;
using media::CHANNEL_LAYOUT_STEREO;
-static const int kFifoSize = 2048;
+static const int kMaxNumberOfBuffersInFifo = 5;
namespace content {
-WebAudioCapturerSource::WebAudioCapturerSource(WebRtcAudioCapturer* capturer)
- : capturer_(capturer),
- set_format_channels_(0),
- callback_(0),
- started_(false) {
+WebAudioCapturerSource::WebAudioCapturerSource()
+ : track_(NULL),
+ source_provider_(NULL) {
}
WebAudioCapturerSource::~WebAudioCapturerSource() {
@@ -30,57 +29,72 @@ WebAudioCapturerSource::~WebAudioCapturerSource() {
void WebAudioCapturerSource::setFormat(
size_t number_of_channels, float sample_rate) {
+ DCHECK(thread_checker_.CalledOnValidThread());
DVLOG(1) << "WebAudioCapturerSource::setFormat(sample_rate="
<< sample_rate << ")";
- if (number_of_channels <= 2) {
- set_format_channels_ = number_of_channels;
- ChannelLayout channel_layout =
- number_of_channels == 1 ? CHANNEL_LAYOUT_MONO : CHANNEL_LAYOUT_STEREO;
- capturer_->SetCapturerSource(this, channel_layout, sample_rate);
- } else {
- // TODO(crogers): Handle more than just the mono and stereo cases.
+ if (number_of_channels > 2) {
+ // TODO(xians): Handle more than just the mono and stereo cases.
LOG(WARNING) << "WebAudioCapturerSource::setFormat() : unhandled format.";
+ return;
}
+
+ ChannelLayout channel_layout =
+ number_of_channels == 1 ? CHANNEL_LAYOUT_MONO : CHANNEL_LAYOUT_STEREO;
+
+ base::AutoLock auto_lock(lock_);
+ // Set the format used by this WebAudioCapturerSource. We are using 10ms data
+ // as buffer size since that is the native buffer size of WebRtc packet
+ // running on.
+ params_.Reset(media::AudioParameters::AUDIO_PCM_LOW_LATENCY,
+ channel_layout, number_of_channels, 0, sample_rate, 16,
+ sample_rate / 100);
+
+ // Update the downstream client to use the same format as what WebKit
+ // is using.
+ if (track_)
+ track_->SetCaptureFormat(params_);
+
+ wrapper_bus_ = AudioBus::CreateWrapper(params_.channels());
+ capture_bus_ = AudioBus::Create(params_);
+ fifo_.reset(new AudioFifo(
+ params_.channels(),
+ kMaxNumberOfBuffersInFifo * params_.frames_per_buffer()));
}
-void WebAudioCapturerSource::Initialize(
- const media::AudioParameters& params,
- media::AudioCapturerSource::CaptureCallback* callback,
- int session_id) {
+void WebAudioCapturerSource::Start(
+ WebRtcLocalAudioTrack* track,
+ WebRtcLocalAudioSourceProvider* source_provider) {
+ DCHECK(thread_checker_.CalledOnValidThread());
+ DCHECK(track);
+ DCHECK(source_provider);
// The downstream client should be configured the same as what WebKit
// is feeding it.
- DCHECK_EQ(set_format_channels_, params.channels());
+ track->SetCaptureFormat(params_);
base::AutoLock auto_lock(lock_);
- params_ = params;
- callback_ = callback;
- wrapper_bus_ = AudioBus::CreateWrapper(params.channels());
- capture_bus_ = AudioBus::Create(params);
- fifo_.reset(new AudioFifo(params.channels(), kFifoSize));
-}
-
-void WebAudioCapturerSource::Start() {
- started_ = true;
+ track_ = track;
+ source_provider_ = source_provider;
}
void WebAudioCapturerSource::Stop() {
- started_ = false;
+ DCHECK(thread_checker_.CalledOnValidThread());
+ base::AutoLock auto_lock(lock_);
+ track_ = NULL;
+ source_provider_ = NULL;
}
void WebAudioCapturerSource::consumeAudio(
const WebKit::WebVector<const float*>& audio_data,
size_t number_of_frames) {
base::AutoLock auto_lock(lock_);
-
- if (!callback_)
+ if (!track_)
return;
wrapper_bus_->set_frames(number_of_frames);
// Make sure WebKit is honoring what it told us up front
// about the channels.
- DCHECK_EQ(set_format_channels_, static_cast<int>(audio_data.size()));
- DCHECK_EQ(set_format_channels_, wrapper_bus_->channels());
+ DCHECK_EQ(params_.channels(), static_cast<int>(audio_data.size()));
for (size_t i = 0; i < audio_data.size(); ++i)
wrapper_bus_->SetChannelData(i, const_cast<float*>(audio_data[i]));
@@ -88,15 +102,20 @@ void WebAudioCapturerSource::consumeAudio(
// Handle mismatch between WebAudio buffer-size and WebRTC.
int available = fifo_->max_frames() - fifo_->frames();
if (available < static_cast<int>(number_of_frames)) {
- LOG(ERROR) << "WebAudioCapturerSource::Consume() : FIFO overrun.";
+ NOTREACHED() << "WebAudioCapturerSource::Consume() : FIFO overrun.";
return;
}
fifo_->Push(wrapper_bus_.get());
int capture_frames = params_.frames_per_buffer();
+ int delay_ms = 0;
+ int volume = 0;
+ bool key_pressed = false;
while (fifo_->frames() >= capture_frames) {
+ source_provider_->GetAudioProcessingParams(
+ &delay_ms, &volume, &key_pressed);
fifo_->Consume(capture_bus_.get(), 0, capture_frames);
- callback_->Capture(capture_bus_.get(), 0, 1.0, false);
+ track_->Capture(capture_bus_.get(), delay_ms, volume, key_pressed);
}
}
diff --git a/content/renderer/media/webaudio_capturer_source.h b/content/renderer/media/webaudio_capturer_source.h
index 68bd05f..b438a40 100644
--- a/content/renderer/media/webaudio_capturer_source.h
+++ b/content/renderer/media/webaudio_capturer_source.h
@@ -7,6 +7,7 @@
#include "base/memory/ref_counted.h"
#include "base/synchronization/lock.h"
+#include "base/threading/thread_checker.h"
#include "media/audio/audio_parameters.h"
#include "media/base/audio_capturer_source.h"
#include "media/base/audio_fifo.h"
@@ -15,50 +16,59 @@
namespace content {
-class WebRtcAudioCapturer;
+class WebRtcLocalAudioTrack;
+class WebRtcLocalAudioSourceProvider;
// WebAudioCapturerSource is the missing link between
-// WebAudio's MediaStreamAudioDestinationNode and WebRtcAudioCapturer.
+// WebAudio's MediaStreamAudioDestinationNode and WebRtcLocalAudioTrack.
//
// 1. WebKit calls the setFormat() method setting up the basic stream format
-// (channels, and sample-rate). At this time, it dispatches this information
-// to the WebRtcAudioCapturer by calling its SetCapturerSource() method.
-// 2. Initialize() is called, where we should get back the same
-// stream format information as (1). We also get the CaptureCallback here.
-// 3. consumeAudio() is called periodically by WebKit which dispatches the
-// audio stream to the CaptureCallback::Capture() method.
+// (channels, and sample-rate).
+// 2. consumeAudio() is called periodically by WebKit which dispatches the
+// audio stream to the WebRtcLocalAudioTrack::Capture() method.
class WebAudioCapturerSource
- : public media::AudioCapturerSource,
- public WebKit::WebAudioDestinationConsumer {
+ : public base::RefCountedThreadSafe<WebAudioCapturerSource>,
+ public WebKit::WebAudioDestinationConsumer {
public:
- explicit WebAudioCapturerSource(WebRtcAudioCapturer* capturer);
+ WebAudioCapturerSource();
// WebAudioDestinationConsumer implementation.
- // setFormat() is called early on, so that we can configure the capturer.
+ // setFormat() is called early on, so that we can configure the audio track.
virtual void setFormat(size_t number_of_channels, float sample_rate) OVERRIDE;
// MediaStreamAudioDestinationNode periodically calls consumeAudio().
+ // Called on the WebAudio audio thread.
virtual void consumeAudio(const WebKit::WebVector<const float*>& audio_data,
size_t number_of_frames) OVERRIDE;
- // AudioCapturerSource implementation.
- virtual void Initialize(
- const media::AudioParameters& params,
- media::AudioCapturerSource::CaptureCallback* callback,
- int session_id) OVERRIDE;
+ // Called when the WebAudioCapturerSource is hooking to a media audio track.
+ // |track| is the sink of the data flow. |source_provider| is the source of
+ // the data flow where stream information like delay, volume, key_pressed,
+ // is stored.
+ void Start(WebRtcLocalAudioTrack* track,
+ WebRtcLocalAudioSourceProvider* source_provider);
- virtual void Start() OVERRIDE;
- virtual void Stop() OVERRIDE;
- virtual void SetVolume(double volume) OVERRIDE { }
- virtual void SetAutomaticGainControl(bool enable) OVERRIDE { }
+ // Called when the media audio track is stopping.
+ void Stop();
- private:
+ protected:
+ friend class base::RefCountedThreadSafe<WebAudioCapturerSource>;
virtual ~WebAudioCapturerSource();
- WebRtcAudioCapturer* capturer_;
+ private:
+ // Used to DCHECK that some methods are called on the correct thread.
+ base::ThreadChecker thread_checker_;
+
+ // The audio track this WebAudioCapturerSource is feeding data to.
+ // WebRtcLocalAudioTrack is reference counted, and owning this object.
+ // To avoid circular reference, a raw pointer is kept here.
+ WebRtcLocalAudioTrack* track_;
+
+ // A raw pointer to the source provider to get audio processing params like
+ // delay, volume, key_pressed information.
+ // This |source_provider_| is guaranteed to outlive this object.
+ WebRtcLocalAudioSourceProvider* source_provider_;
- int set_format_channels_;
media::AudioParameters params_;
- media::AudioCapturerSource::CaptureCallback* callback_;
// Wraps data coming from HandleCapture().
scoped_ptr<media::AudioBus> wrapper_bus_;
diff --git a/content/renderer/media/webrtc_audio_capturer.cc b/content/renderer/media/webrtc_audio_capturer.cc
index 26b0fd7..c5127c1 100644
--- a/content/renderer/media/webrtc_audio_capturer.cc
+++ b/content/renderer/media/webrtc_audio_capturer.cc
@@ -34,63 +34,8 @@ const int kValidInputRates[] = {48000, 44100};
const int kValidInputRates[] = {44100};
#endif
-int GetBufferSizeForSampleRate(int sample_rate) {
- int buffer_size = 0;
-#if defined(OS_WIN) || defined(OS_MACOSX)
- // Use a buffer size of 10ms.
- buffer_size = (sample_rate / 100);
-#elif defined(OS_LINUX) || defined(OS_OPENBSD)
- // Based on tests using the current ALSA implementation in Chrome, we have
- // found that the best combination is 20ms on the input side and 10ms on the
- // output side.
- buffer_size = 2 * sample_rate / 100;
-#elif defined(OS_ANDROID)
- // TODO(leozwang): Tune and adjust buffer size on Android.
- buffer_size = 2 * sample_rate / 100;
-#endif
- return buffer_size;
-}
-
} // namespace
-// This is a temporary audio buffer with parameters used to send data to
-// callbacks.
-class WebRtcAudioCapturer::ConfiguredBuffer :
- public base::RefCounted<WebRtcAudioCapturer::ConfiguredBuffer> {
- public:
- ConfiguredBuffer() {}
-
- bool Initialize(int sample_rate,
- media::ChannelLayout channel_layout) {
- int buffer_size = GetBufferSizeForSampleRate(sample_rate);
- DVLOG(1) << "Using WebRTC input buffer size: " << buffer_size;
-
- media::AudioParameters::Format format =
- media::AudioParameters::AUDIO_PCM_LOW_LATENCY;
-
- // bits_per_sample is always 16 for now.
- int bits_per_sample = 16;
- int channels = ChannelLayoutToChannelCount(channel_layout);
- params_.Reset(format, channel_layout, channels, 0,
- sample_rate, bits_per_sample, buffer_size);
- buffer_.reset(new int16[params_.frames_per_buffer() * params_.channels()]);
-
- return true;
- }
-
- int16* buffer() const { return buffer_.get(); }
- const media::AudioParameters& params() const { return params_; }
-
- private:
- ~ConfiguredBuffer() {}
- friend class base::RefCounted<WebRtcAudioCapturer::ConfiguredBuffer>;
-
- scoped_ptr<int16[]> buffer_;
-
- // Cached values of utilized audio parameters.
- media::AudioParameters params_;
-};
-
// Reference counted container of WebRtcLocalAudioTrack delegate.
class WebRtcAudioCapturer::TrackOwner
: public base::RefCountedThreadSafe<WebRtcAudioCapturer::TrackOwner> {
@@ -98,20 +43,16 @@ class WebRtcAudioCapturer::TrackOwner
explicit TrackOwner(WebRtcLocalAudioTrack* track)
: delegate_(track) {}
- void CaptureData(const int16* audio_data,
- int number_of_channels,
- int number_of_frames,
- int audio_delay_milliseconds,
- int volume,
- bool key_pressed) {
+ void Capture(media::AudioBus* audio_source,
+ int audio_delay_milliseconds,
+ double volume,
+ bool key_pressed) {
base::AutoLock lock(lock_);
if (delegate_) {
- delegate_->CaptureData(audio_data,
- number_of_channels,
- number_of_frames,
- audio_delay_milliseconds,
- volume,
- key_pressed);
+ delegate_->Capture(audio_source,
+ audio_delay_milliseconds,
+ volume,
+ key_pressed);
}
}
@@ -161,47 +102,55 @@ scoped_refptr<WebRtcAudioCapturer> WebRtcAudioCapturer::CreateCapturer() {
return capturer;
}
-bool WebRtcAudioCapturer::Reconfigure(int sample_rate,
+void WebRtcAudioCapturer::Reconfigure(int sample_rate,
media::ChannelLayout channel_layout) {
- scoped_refptr<ConfiguredBuffer> new_buffer(new ConfiguredBuffer());
- if (!new_buffer->Initialize(sample_rate, channel_layout))
- return false;
+ DCHECK(thread_checker_.CalledOnValidThread());
+ int buffer_size = GetBufferSize(sample_rate);
+ DVLOG(1) << "Using WebRTC input buffer size: " << buffer_size;
+
+ media::AudioParameters::Format format =
+ media::AudioParameters::AUDIO_PCM_LOW_LATENCY;
+
+ // bits_per_sample is always 16 for now.
+ int bits_per_sample = 16;
+ media::AudioParameters params(format, channel_layout, sample_rate,
+ bits_per_sample, buffer_size);
TrackList tracks;
{
base::AutoLock auto_lock(lock_);
-
- buffer_ = new_buffer;
tracks = tracks_;
+ params_ = params;
}
// Tell all audio_tracks which format we use.
for (TrackList::const_iterator it = tracks.begin();
it != tracks.end(); ++it)
- (*it)->SetCaptureFormat(new_buffer->params());
-
- return true;
+ (*it)->SetCaptureFormat(params);
}
bool WebRtcAudioCapturer::Initialize(int render_view_id,
media::ChannelLayout channel_layout,
int sample_rate,
+ int buffer_size,
int session_id,
const std::string& device_id) {
DCHECK(thread_checker_.CalledOnValidThread());
+ DCHECK_GE(render_view_id, 0);
DVLOG(1) << "WebRtcAudioCapturer::Initialize()";
DVLOG(1) << "Audio input hardware channel layout: " << channel_layout;
UMA_HISTOGRAM_ENUMERATION("WebRTC.AudioInputChannelLayout",
channel_layout, media::CHANNEL_LAYOUT_MAX);
+ render_view_id_ = render_view_id;
session_id_ = session_id;
device_id_ = device_id;
+ hardware_buffer_size_ = buffer_size;
+
if (render_view_id == -1) {
- // This capturer is used by WebAudio, return true without creating a
- // default capturing source. WebAudio will inject its own source via
- // SetCapturerSource() at a later state.
- DCHECK(device_id.empty());
+ // Return true here to allow injecting a new source via SetCapturerSource()
+ // at a later state.
return true;
}
@@ -232,8 +181,7 @@ bool WebRtcAudioCapturer::Initialize(int render_view_id,
return false;
}
- if (!Reconfigure(sample_rate, channel_layout))
- return false;
+ Reconfigure(sample_rate, channel_layout);
// Create and configure the default audio capturing source. The |source_|
// will be overwritten if an external client later calls SetCapturerSource()
@@ -249,8 +197,13 @@ WebRtcAudioCapturer::WebRtcAudioCapturer()
: source_(NULL),
running_(false),
agc_is_enabled_(false),
+ render_view_id_(-1),
+ hardware_buffer_size_(0),
session_id_(0),
- volume_(0) {
+ volume_(0),
+ source_provider_(new WebRtcLocalAudioSourceProvider()),
+ peer_connection_mode_(false) {
+ DCHECK(source_provider_.get());
DVLOG(1) << "WebRtcAudioCapturer::WebRtcAudioCapturer()";
}
@@ -274,10 +227,7 @@ void WebRtcAudioCapturer::AddTrack(WebRtcLocalAudioTrack* track) {
DCHECK(std::find_if(tracks_.begin(), tracks_.end(),
TrackOwner::TrackWrapper(track)) == tracks_.end());
- if (buffer_.get()) {
- track->SetCaptureFormat(buffer_->params());
- }
-
+ track->SetCaptureFormat(params_);
tracks_.push_back(new WebRtcAudioCapturer::TrackOwner(track));
}
@@ -315,7 +265,6 @@ void WebRtcAudioCapturer::SetCapturerSource(
DVLOG(1) << "SetCapturerSource(channel_layout=" << channel_layout << ","
<< "sample_rate=" << sample_rate << ")";
scoped_refptr<media::AudioCapturerSource> old_source;
- scoped_refptr<ConfiguredBuffer> current_buffer;
bool restart_source = false;
{
base::AutoLock auto_lock(lock_);
@@ -324,45 +273,58 @@ void WebRtcAudioCapturer::SetCapturerSource(
source_.swap(old_source);
source_ = source;
- current_buffer = buffer_;
// Reset the flag to allow starting the new source.
restart_source = running_;
running_ = false;
}
- const bool no_default_audio_source_exists = !current_buffer.get();
-
- // Detach the old source from normal recording or perform first-time
- // initialization if Initialize() has never been called. For the second
- // case, the caller is not "taking over an ongoing session" but instead
- // "taking control over a new session".
- if (old_source.get() || no_default_audio_source_exists) {
- DVLOG(1) << "New capture source will now be utilized.";
- if (old_source.get())
- old_source->Stop();
-
- // Dispatch the new parameters both to the sink(s) and to the new source.
- // The idea is to get rid of any dependency of the microphone parameters
- // which would normally be used by default.
- if (!Reconfigure(sample_rate, channel_layout)) {
- return;
- } else {
- // The buffer has been reconfigured. Update |current_buffer|.
- base::AutoLock auto_lock(lock_);
- current_buffer = buffer_;
- }
- }
+ DVLOG(1) << "Switching to a new capture source.";
+ if (old_source.get())
+ old_source->Stop();
- if (source.get()) {
- // Make sure to grab the new parameters in case they were reconfigured.
- source->Initialize(current_buffer->params(), this, session_id_);
- }
+ // Dispatch the new parameters both to the sink(s) and to the new source.
+ // The idea is to get rid of any dependency of the microphone parameters
+ // which would normally be used by default.
+ Reconfigure(sample_rate, channel_layout);
+
+ // Make sure to grab the new parameters in case they were reconfigured.
+ media::AudioParameters params = audio_parameters();
+ source_provider_->Initialize(params);
+ if (source.get())
+ source->Initialize(params, this, session_id_);
if (restart_source)
Start();
}
+void WebRtcAudioCapturer::EnablePeerConnectionMode() {
+ DCHECK(thread_checker_.CalledOnValidThread());
+ DVLOG(1) << "EnablePeerConnectionMode";
+ // Do nothing if the peer connection mode has been enabled.
+ if (peer_connection_mode_)
+ return;
+
+ peer_connection_mode_ = true;
+ int render_view_id = -1;
+ {
+ base::AutoLock auto_lock(lock_);
+ // Simply return if there is no existing source or the |render_view_id_| is
+ // not valid.
+ if (!source_.get() || render_view_id_== -1)
+ return;
+
+ render_view_id = render_view_id_;
+ }
+
+ // Create a new audio stream as source which will open the hardware using
+ // WebRtc native buffer size.
+ media::AudioParameters params = audio_parameters();
+ SetCapturerSource(AudioDeviceFactory::NewInputDevice(render_view_id),
+ params.channel_layout(),
+ static_cast<float>(params.sample_rate()));
+}
+
void WebRtcAudioCapturer::Start() {
DVLOG(1) << "WebRtcAudioCapturer::Start()";
base::AutoLock auto_lock(lock_);
@@ -443,7 +405,7 @@ void WebRtcAudioCapturer::Capture(media::AudioBus* audio_source,
#endif
TrackList tracks;
- scoped_refptr<ConfiguredBuffer> buffer_ref_while_calling;
+ int current_volume = 0;
{
base::AutoLock auto_lock(lock_);
if (!running_)
@@ -453,32 +415,21 @@ void WebRtcAudioCapturer::Capture(media::AudioBus* audio_source,
// webrtc::VoiceEngine. webrtc::VoiceEngine will handle the case when the
// volume is higher than 255.
volume_ = static_cast<int>((volume * MaxVolume()) + 0.5);
-
- // Copy the stuff we will need to local variables. In particular, we grab
- // a reference to the buffer so we can ensure it stays alive even if the
- // buffer is reconfigured while we are calling back.
- buffer_ref_while_calling = buffer_;
+ current_volume = volume_;
tracks = tracks_;
}
- int bytes_per_sample =
- buffer_ref_while_calling->params().bits_per_sample() / 8;
-
- // Interleave, scale, and clip input to int and store result in
- // a local byte buffer.
- audio_source->ToInterleaved(audio_source->frames(), bytes_per_sample,
- buffer_ref_while_calling->buffer());
+ // Deliver captured data to source provider, which stores the data into FIFO
+ // for WebAudio to fetch.
+ source_provider_->DeliverData(audio_source, audio_delay_milliseconds,
+ current_volume, key_pressed);
// Feed the data to the tracks.
for (TrackList::const_iterator it = tracks.begin();
it != tracks.end();
++it) {
- (*it)->CaptureData(buffer_ref_while_calling->buffer(),
- audio_source->channels(),
- audio_source->frames(),
- audio_delay_milliseconds,
- volume,
- key_pressed);
+ (*it)->Capture(audio_source, audio_delay_milliseconds,
+ current_volume, key_pressed);
}
}
@@ -488,9 +439,23 @@ void WebRtcAudioCapturer::OnCaptureError() {
media::AudioParameters WebRtcAudioCapturer::audio_parameters() const {
base::AutoLock auto_lock(lock_);
- // |buffer_| can be NULL when SetCapturerSource() or Initialize() has not
- // been called.
- return buffer_.get() ? buffer_->params() : media::AudioParameters();
+ return params_;
+}
+
+int WebRtcAudioCapturer::GetBufferSize(int sample_rate) const {
+ DCHECK(thread_checker_.CalledOnValidThread());
+#if defined(OS_ANDROID)
+ // TODO(henrika): Tune and adjust buffer size on Android.
+ return (2 * sample_rate / 100);
+#endif
+
+ // Use the native hardware buffer size in non peer connection mode.
+ if (!peer_connection_mode_ && hardware_buffer_size_)
+ return hardware_buffer_size_;
+
+ // WebRtc is running at a buffer size of 10ms data. Use a multiple of 10ms
+ // as the buffer size to achieve the best performance for WebRtc.
+ return (sample_rate / 100);
}
} // namespace content
diff --git a/content/renderer/media/webrtc_audio_capturer.h b/content/renderer/media/webrtc_audio_capturer.h
index caa88d03..2124679 100644
--- a/content/renderer/media/webrtc_audio_capturer.h
+++ b/content/renderer/media/webrtc_audio_capturer.h
@@ -13,6 +13,7 @@
#include "base/synchronization/lock.h"
#include "base/threading/thread_checker.h"
#include "content/renderer/media/webrtc_audio_device_impl.h"
+#include "content/renderer/media/webrtc_local_audio_source_provider.h"
#include "media/audio/audio_input_device.h"
#include "media/base/audio_capturer_source.h"
@@ -50,6 +51,7 @@ class CONTENT_EXPORT WebRtcAudioCapturer
bool Initialize(int render_view_id,
media::ChannelLayout channel_layout,
int sample_rate,
+ int buffer_size,
int session_id,
const std::string& device_id);
@@ -73,6 +75,11 @@ class CONTENT_EXPORT WebRtcAudioCapturer
media::ChannelLayout channel_layout,
float sample_rate);
+ // Called when a stream is connecting to a peer connection. This will set
+ // up the native buffer size for the stream in order to optimize the
+ // performance for peer connection.
+ void EnablePeerConnectionMode();
+
// Volume APIs used by WebRtcAudioDeviceImpl.
// Called on the AudioInputDevice audio thread.
void SetVolume(int volume);
@@ -95,6 +102,10 @@ class CONTENT_EXPORT WebRtcAudioCapturer
const std::string& device_id() const { return device_id_; }
+ WebKit::WebAudioSourceProvider* audio_source_provider() const {
+ return source_provider_.get();
+ }
+
protected:
friend class base::RefCountedThreadSafe<WebRtcAudioCapturer>;
WebRtcAudioCapturer();
@@ -112,9 +123,9 @@ class CONTENT_EXPORT WebRtcAudioCapturer
bool key_pressed) OVERRIDE;
virtual void OnCaptureError() OVERRIDE;
- // Reconfigures the capturer with a new buffer size and capture parameters.
- // Must be called without holding the lock. Returns true on success.
- bool Reconfigure(int sample_rate, media::ChannelLayout channel_layout);
+ // Reconfigures the capturer with a new capture parameters.
+ // Must be called without holding the lock.
+ void Reconfigure(int sample_rate, media::ChannelLayout channel_layout);
// Starts recording audio.
// Triggered by AddSink() on the main render thread or a Libjingle working
@@ -126,6 +137,9 @@ class CONTENT_EXPORT WebRtcAudioCapturer
// thread. It should NOT be called under |lock_|.
void Stop();
+ // Helper function to get the buffer size based on |peer_connection_mode_|
+ // and sample rate;
+ int GetBufferSize(int sample_rate) const;
// Used to DCHECK that we are called on the correct thread.
base::ThreadChecker thread_checker_;
@@ -140,15 +154,20 @@ class CONTENT_EXPORT WebRtcAudioCapturer
// The audio data source from the browser process.
scoped_refptr<media::AudioCapturerSource> source_;
- // Buffers used for temporary storage during capture callbacks.
- // Allocated during initialization.
- class ConfiguredBuffer;
- scoped_refptr<ConfiguredBuffer> buffer_;
+ // Cached audio parameters for output.
+ media::AudioParameters params_;
+
bool running_;
// True when automatic gain control is enabled, false otherwise.
bool agc_is_enabled_;
+ int render_view_id_;
+
+ // Cached value for the hardware native buffer size, used when
+ // |peer_connection_mode_| is set to false.
+ int hardware_buffer_size_;
+
// The media session ID used to identify which input device to be started by
// the browser.
int session_id_;
@@ -160,6 +179,15 @@ class CONTENT_EXPORT WebRtcAudioCapturer
// Range is [0, 255].
int volume_;
+ // The source provider to feed the capture data to other clients like
+ // WebAudio.
+ // TODO(xians): Move the source provider to track once we don't need to feed
+ // delay, volume, key_pressed information to WebAudioCapturerSource.
+ const scoped_ptr<WebRtcLocalAudioSourceProvider> source_provider_;
+
+ // Flag which affects the buffer size used by the capturer.
+ bool peer_connection_mode_;
+
DISALLOW_COPY_AND_ASSIGN(WebRtcAudioCapturer);
};
diff --git a/content/renderer/media/webrtc_audio_device_unittest.cc b/content/renderer/media/webrtc_audio_device_unittest.cc
index ec970dc..7a194e6 100644
--- a/content/renderer/media/webrtc_audio_device_unittest.cc
+++ b/content/renderer/media/webrtc_audio_device_unittest.cc
@@ -119,7 +119,7 @@ bool CreateAndInitializeCapturer(WebRtcAudioDeviceImpl* webrtc_audio_device) {
int sample_rate = hardware_config->GetInputSampleRate();
media::ChannelLayout channel_layout =
hardware_config->GetInputChannelLayout();
- if (!capturer->Initialize(kRenderViewId, channel_layout, sample_rate, 1,
+ if (!capturer->Initialize(kRenderViewId, channel_layout, sample_rate, 0, 1,
media::AudioManagerBase::kDefaultDeviceId)) {
return false;
}
@@ -137,7 +137,7 @@ scoped_refptr<WebRtcLocalAudioTrack>
CreateAndStartLocalAudioTrack(WebRtcAudioCapturer* capturer,
WebRtcAudioCapturerSink* sink) {
scoped_refptr<WebRtcLocalAudioTrack> local_audio_track(
- WebRtcLocalAudioTrack::Create(std::string(), capturer, NULL, NULL));
+ WebRtcLocalAudioTrack::Create(std::string(), capturer, NULL, NULL, NULL));
local_audio_track->AddSink(sink);
local_audio_track->Start();
return local_audio_track;
diff --git a/content/renderer/media/webrtc_local_audio_source_provider.cc b/content/renderer/media/webrtc_local_audio_source_provider.cc
new file mode 100644
index 0000000..cc70c78
--- /dev/null
+++ b/content/renderer/media/webrtc_local_audio_source_provider.cc
@@ -0,0 +1,155 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "content/renderer/media/webrtc_local_audio_source_provider.h"
+
+#include "base/logging.h"
+#include "content/renderer/render_thread_impl.h"
+#include "media/audio/audio_parameters.h"
+#include "media/base/audio_fifo.h"
+#include "media/base/audio_hardware_config.h"
+#include "third_party/WebKit/public/web/WebAudioSourceProviderClient.h"
+
+using WebKit::WebVector;
+
+namespace content {
+
+static const size_t kMaxNumberOfBuffers = 10;
+
+// Size of the buffer that WebAudio processes each time, it is the same value
+// as AudioNode::ProcessingSizeInFrames in WebKit.
+// static
+const size_t WebRtcLocalAudioSourceProvider::kWebAudioRenderBufferSize = 128;
+
+WebRtcLocalAudioSourceProvider::WebRtcLocalAudioSourceProvider()
+ : audio_delay_ms_(0),
+ volume_(1),
+ key_pressed_(false),
+ is_enabled_(false) {
+}
+
+WebRtcLocalAudioSourceProvider::~WebRtcLocalAudioSourceProvider() {
+ if (audio_converter_.get())
+ audio_converter_->RemoveInput(this);
+}
+
+void WebRtcLocalAudioSourceProvider::Initialize(
+ const media::AudioParameters& source_params) {
+ DCHECK(thread_checker_.CalledOnValidThread());
+
+ // Use the native audio output hardware sample-rate for the sink.
+ if (RenderThreadImpl::current()) {
+ media::AudioHardwareConfig* hardware_config =
+ RenderThreadImpl::current()->GetAudioHardwareConfig();
+ int sample_rate = hardware_config->GetOutputSampleRate();
+ sink_params_.Reset(
+ source_params.format(), media::CHANNEL_LAYOUT_STEREO, 2, 0,
+ sample_rate, source_params.bits_per_sample(),
+ kWebAudioRenderBufferSize);
+ } else {
+ // This happens on unittests which does not have a valid RenderThreadImpl,
+ // the unittests should have injected their own |sink_params_| for testing.
+ DCHECK(sink_params_.IsValid());
+ }
+
+ base::AutoLock auto_lock(lock_);
+ source_params_ = source_params;
+ // Create the audio converter with |disable_fifo| as false so that the
+ // converter will request source_params.frames_per_buffer() each time.
+ // This will not increase the complexity as there is only one client to
+ // the converter.
+ audio_converter_.reset(
+ new media::AudioConverter(source_params, sink_params_, false));
+ audio_converter_->AddInput(this);
+ fifo_.reset(new media::AudioFifo(
+ source_params.channels(),
+ kMaxNumberOfBuffers * source_params.frames_per_buffer()));
+}
+
+void WebRtcLocalAudioSourceProvider::DeliverData(
+ media::AudioBus* audio_source,
+ int audio_delay_milliseconds,
+ int volume,
+ bool key_pressed) {
+ base::AutoLock auto_lock(lock_);
+ if (!is_enabled_)
+ return;
+
+ DCHECK(fifo_.get());
+
+ if (fifo_->frames() + audio_source->frames() <= fifo_->max_frames()) {
+ fifo_->Push(audio_source);
+ } else {
+ // This can happen if the data in FIFO is too slowed to be consumed or
+ // WebAudio stops consuming data.
+ DLOG(WARNING) << "Local source provicer FIFO is full" << fifo_->frames();
+ }
+
+ // Cache the values for GetAudioProcessingParams().
+ last_fill_ = base::TimeTicks::Now();
+ audio_delay_ms_ = audio_delay_milliseconds;
+ volume_ = volume;
+ key_pressed_ = key_pressed;
+}
+
+void WebRtcLocalAudioSourceProvider::GetAudioProcessingParams(
+ int* delay_ms, int* volume, bool* key_pressed) {
+ int elapsed_ms = 0;
+ if (!last_fill_.is_null()) {
+ elapsed_ms = static_cast<int>(
+ (base::TimeTicks::Now() - last_fill_).InMilliseconds());
+ }
+ *delay_ms = audio_delay_ms_ + elapsed_ms + static_cast<int>(
+ 1000 * fifo_->frames() / source_params_.sample_rate() + 0.5);
+ *volume = volume_;
+ *key_pressed = key_pressed_;
+}
+
+void WebRtcLocalAudioSourceProvider::setClient(
+ WebKit::WebAudioSourceProviderClient* client) {
+ NOTREACHED();
+}
+
+void WebRtcLocalAudioSourceProvider::provideInput(
+ const WebVector<float*>& audio_data, size_t number_of_frames) {
+ DCHECK_EQ(number_of_frames, kWebAudioRenderBufferSize);
+ if (!bus_wrapper_ ||
+ static_cast<size_t>(bus_wrapper_->channels()) != audio_data.size()) {
+ bus_wrapper_ = media::AudioBus::CreateWrapper(audio_data.size());
+ }
+
+ bus_wrapper_->set_frames(number_of_frames);
+ for (size_t i = 0; i < audio_data.size(); ++i)
+ bus_wrapper_->SetChannelData(i, audio_data[i]);
+
+ base::AutoLock auto_lock(lock_);
+ DCHECK(audio_converter_.get());
+ DCHECK(fifo_.get());
+ is_enabled_ = true;
+ audio_converter_->Convert(bus_wrapper_.get());
+}
+
+double WebRtcLocalAudioSourceProvider::ProvideInput(
+ media::AudioBus* audio_bus, base::TimeDelta buffer_delay) {
+ if (fifo_->frames() >= audio_bus->frames()) {
+ fifo_->Consume(audio_bus, 0, audio_bus->frames());
+ } else {
+ audio_bus->Zero();
+ if (!last_fill_.is_null()) {
+ DLOG(WARNING) << "Underrun, FIFO has data " << fifo_->frames()
+ << " samples but " << audio_bus->frames()
+ << " samples are needed";
+ }
+ }
+
+ return 1.0;
+}
+
+void WebRtcLocalAudioSourceProvider::SetSinkParamsForTesting(
+ const media::AudioParameters& sink_params) {
+ DCHECK(thread_checker_.CalledOnValidThread());
+ sink_params_ = sink_params;
+}
+
+} // namespace content
diff --git a/content/renderer/media/webrtc_local_audio_source_provider.h b/content/renderer/media/webrtc_local_audio_source_provider.h
new file mode 100644
index 0000000..23ba215
--- /dev/null
+++ b/content/renderer/media/webrtc_local_audio_source_provider.h
@@ -0,0 +1,109 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef CONTENT_RENDERER_MEDIA_WEBRTC_LOCAL_AUDIO_SOURCE_PROVIDER_H_
+#define CONTENT_RENDERER_MEDIA_WEBRTC_LOCAL_AUDIO_SOURCE_PROVIDER_H_
+
+#include "base/memory/scoped_ptr.h"
+#include "base/synchronization/lock.h"
+#include "base/threading/thread_checker.h"
+#include "base/time/time.h"
+#include "content/common/content_export.h"
+#include "media/base/audio_converter.h"
+#include "third_party/WebKit/public/platform/WebVector.h"
+#include "third_party/WebKit/public/web/WebAudioSourceProvider.h"
+
+namespace media {
+class AudioBus;
+class AudioConverter;
+class AudioFifo;
+class AudioParameters;
+}
+
+namespace WebKit {
+class WebAudioSourceProviderClient;
+}
+
+namespace content {
+
+// WebRtcLocalAudioSourceProvider provides a bridge between classes:
+// WebRtcAudioCapturer ---> WebKit::WebAudioSourceProvider
+//
+// WebRtcLocalAudioSourceProvider works as a sink to the WebRtcAudiocapturer
+// and store the capture data to a FIFO. When the media stream is connected to
+// WebAudio as a source provider, WebAudio will periodically call
+// provideInput() to get the data from the FIFO.
+//
+// All calls are protected by a lock.
+class CONTENT_EXPORT WebRtcLocalAudioSourceProvider
+ : NON_EXPORTED_BASE(public media::AudioConverter::InputCallback),
+ NON_EXPORTED_BASE(public WebKit::WebAudioSourceProvider) {
+ public:
+ static const size_t kWebAudioRenderBufferSize;
+
+ WebRtcLocalAudioSourceProvider();
+ virtual ~WebRtcLocalAudioSourceProvider();
+
+ // Initialize function for the souce provider. This can be called multiple
+ // times if the source format has changed.
+ void Initialize(const media::AudioParameters& source_params);
+
+ // Called by the WebRtcAudioCapturer to deliever captured data into fifo on
+ // the capture audio thread.
+ void DeliverData(media::AudioBus* audio_source,
+ int audio_delay_milliseconds,
+ int volume,
+ bool key_pressed);
+
+ // Called by the WebAudioCapturerSource to get the audio processing params.
+ // This function is triggered by provideInput() on the WebAudio audio thread,
+ // so it has been under the protection of |lock_|.
+ void GetAudioProcessingParams(int* delay_ms, int* volume, bool* key_pressed);
+
+ // WebKit::WebAudioSourceProvider implementation.
+ virtual void setClient(WebKit::WebAudioSourceProviderClient* client) OVERRIDE;
+ virtual void provideInput(const WebKit::WebVector<float*>& audio_data,
+ size_t number_of_frames) OVERRIDE;
+
+ // media::AudioConverter::Inputcallback implementation.
+ // This function is triggered by provideInput()on the WebAudio audio thread,
+ // so it has been under the protection of |lock_|.
+ virtual double ProvideInput(media::AudioBus* audio_bus,
+ base::TimeDelta buffer_delay) OVERRIDE;
+
+ // Method to allow the unittests to inject its own sink parameters to avoid
+ // query the hardware.
+ // TODO(xians,tommi): Remove and instead offer a way to inject the sink
+ // parameters so that the implementation doesn't rely on the global default
+ // hardware config but instead gets the parameters directly from the sink
+ // (WebAudio in this case). Ideally the unit test should be able to use that
+ // same mechanism to inject the sink parameters for testing.
+ void SetSinkParamsForTesting(const media::AudioParameters& sink_params);
+
+ private:
+ // Used to DCHECK that we are called on the correct thread.
+ base::ThreadChecker thread_checker_;
+
+ scoped_ptr<media::AudioConverter> audio_converter_;
+ scoped_ptr<media::AudioFifo> fifo_;
+ scoped_ptr<media::AudioBus> bus_wrapper_;
+ int audio_delay_ms_;
+ int volume_;
+ bool key_pressed_;
+ bool is_enabled_;
+ media::AudioParameters source_params_;
+ media::AudioParameters sink_params_;
+
+ // Protects all the member variables above.
+ base::Lock lock_;
+
+ // Used to report the correct delay to |webaudio_source_|.
+ base::TimeTicks last_fill_;
+
+ DISALLOW_COPY_AND_ASSIGN(WebRtcLocalAudioSourceProvider);
+};
+
+} // namespace content
+
+#endif // CONTENT_RENDERER_MEDIA_WEBRTC_LOCAL_AUDIO_SOURCE_PROVIDER_H_
diff --git a/content/renderer/media/webrtc_local_audio_source_provider_unittest.cc b/content/renderer/media/webrtc_local_audio_source_provider_unittest.cc
new file mode 100644
index 0000000..c23ce0e
--- /dev/null
+++ b/content/renderer/media/webrtc_local_audio_source_provider_unittest.cc
@@ -0,0 +1,121 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/logging.h"
+#include "content/renderer/media/webrtc_local_audio_source_provider.h"
+#include "media/audio/audio_parameters.h"
+#include "media/base/audio_bus.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace content {
+
+class WebRtcLocalAudioSourceProviderTest : public testing::Test {
+ protected:
+ virtual void SetUp() OVERRIDE {
+ source_params_.Reset(media::AudioParameters::AUDIO_PCM_LOW_LATENCY,
+ media::CHANNEL_LAYOUT_MONO, 1, 0, 48000, 16, 480);
+ sink_params_.Reset(
+ media::AudioParameters::AUDIO_PCM_LOW_LATENCY,
+ media::CHANNEL_LAYOUT_STEREO, 2, 0, 44100, 16,
+ WebRtcLocalAudioSourceProvider::kWebAudioRenderBufferSize);
+ source_bus_ = media::AudioBus::Create(source_params_);
+ sink_bus_ = media::AudioBus::Create(sink_params_);
+ source_provider_.reset(new WebRtcLocalAudioSourceProvider());
+ source_provider_->SetSinkParamsForTesting(sink_params_);
+ source_provider_->Initialize(source_params_);
+ }
+
+ media::AudioParameters source_params_;
+ media::AudioParameters sink_params_;
+ scoped_ptr<media::AudioBus> source_bus_;
+ scoped_ptr<media::AudioBus> sink_bus_;
+ scoped_ptr<WebRtcLocalAudioSourceProvider> source_provider_;
+};
+
+TEST_F(WebRtcLocalAudioSourceProviderTest, VerifyDataFlow) {
+ // Point the WebVector into memory owned by |sink_bus_|.
+ WebKit::WebVector<float*> audio_data(
+ static_cast<size_t>(sink_bus_->channels()));
+ for (size_t i = 0; i < audio_data.size(); ++i)
+ audio_data[i] = sink_bus_->channel(i);
+
+ // Enable the |source_provider_| by asking for data. This will inject
+ // source_params_.frames_per_buffer() of zero into the resampler since there
+ // no available data in the FIFO.
+ source_provider_->provideInput(audio_data, sink_params_.frames_per_buffer());
+ EXPECT_TRUE(sink_bus_->channel(0)[0] == 0);
+
+ // Set the value of source data to be 1.
+ for (int i = 0; i < source_params_.frames_per_buffer(); ++i) {
+ source_bus_->channel(0)[i] = 1;
+ }
+
+ // Deliver data to |source_provider_|.
+ source_provider_->DeliverData(source_bus_.get(), 0, 0, false);
+
+ // Consume the first packet in the resampler, which contains only zero.
+ // And the consumption of the data will trigger pulling the real packet from
+ // the source provider FIFO into the resampler.
+ // Note that we need to count in the provideInput() call a few lines above.
+ for (int i = sink_params_.frames_per_buffer();
+ i < source_params_.frames_per_buffer();
+ i += sink_params_.frames_per_buffer()) {
+ sink_bus_->Zero();
+ source_provider_->provideInput(audio_data,
+ sink_params_.frames_per_buffer());
+ EXPECT_DOUBLE_EQ(0.0, sink_bus_->channel(0)[0]);
+ EXPECT_DOUBLE_EQ(0.0, sink_bus_->channel(1)[0]);
+ }
+
+ // Prepare the second packet for featching.
+ source_provider_->DeliverData(source_bus_.get(), 0, 0, false);
+
+ // Verify the packets.
+ for (int i = 0; i < source_params_.frames_per_buffer();
+ i += sink_params_.frames_per_buffer()) {
+ sink_bus_->Zero();
+ source_provider_->provideInput(audio_data,
+ sink_params_.frames_per_buffer());
+ EXPECT_GT(sink_bus_->channel(0)[0], 0);
+ EXPECT_GT(sink_bus_->channel(1)[0], 0);
+ EXPECT_DOUBLE_EQ(sink_bus_->channel(0)[0], sink_bus_->channel(1)[0]);
+ }
+}
+
+TEST_F(WebRtcLocalAudioSourceProviderTest, VerifyAudioProcessingParams) {
+ // Point the WebVector into memory owned by |sink_bus_|.
+ WebKit::WebVector<float*> audio_data(
+ static_cast<size_t>(sink_bus_->channels()));
+ for (size_t i = 0; i < audio_data.size(); ++i)
+ audio_data[i] = sink_bus_->channel(i);
+
+ // Enable the source provider.
+ source_provider_->provideInput(audio_data, sink_params_.frames_per_buffer());
+
+ // Deliver data to |source_provider_| with audio processing params.
+ int source_delay = 5;
+ int source_volume = 255;
+ bool source_key_pressed = true;
+ source_provider_->DeliverData(source_bus_.get(), source_delay,
+ source_volume, source_key_pressed);
+
+ int delay = 0, volume = 0;
+ bool key_pressed = false;
+ source_provider_->GetAudioProcessingParams(&delay, &volume, &key_pressed);
+ EXPECT_EQ(volume, source_volume);
+ EXPECT_EQ(key_pressed, source_key_pressed);
+ int expected_delay = source_delay + static_cast<int>(
+ source_bus_->frames() / source_params_.sample_rate() + 0.5);
+ EXPECT_GE(delay, expected_delay);
+
+ // Sleep a few ms to simulate processing time. This should increase the delay
+ // value as time passes.
+ int cached_delay = delay;
+ const int kSleepMs = 10;
+ base::PlatformThread::Sleep(base::TimeDelta::FromSeconds(kSleepMs));
+ source_provider_->GetAudioProcessingParams(&delay, &volume, &key_pressed);
+ EXPECT_GT(delay, cached_delay);
+}
+
+} // namespace content
diff --git a/content/renderer/media/webrtc_local_audio_track.cc b/content/renderer/media/webrtc_local_audio_track.cc
index 3687b24..b0e14c4 100644
--- a/content/renderer/media/webrtc_local_audio_track.cc
+++ b/content/renderer/media/webrtc_local_audio_track.cc
@@ -4,12 +4,16 @@
#include "content/renderer/media/webrtc_local_audio_track.h"
+#include "content/renderer/media/webaudio_capturer_source.h"
#include "content/renderer/media/webrtc_audio_capturer.h"
#include "content/renderer/media/webrtc_audio_capturer_sink_owner.h"
+#include "content/renderer/media/webrtc_local_audio_source_provider.h"
+#include "media/base/audio_fifo.h"
#include "third_party/libjingle/source/talk/media/base/audiorenderer.h"
namespace content {
+static const size_t kMaxNumberOfBuffersInFifo = 2;
static const char kAudioTrackKind[] = "audio";
namespace {
@@ -47,28 +51,86 @@ bool NeedsAudioProcessing(
} // namespace.
+// This is a temporary audio buffer with parameters used to send data to
+// callbacks.
+class WebRtcLocalAudioTrack::ConfiguredBuffer :
+ public base::RefCounted<WebRtcLocalAudioTrack::ConfiguredBuffer> {
+ public:
+ ConfiguredBuffer() : sink_buffer_size_(0) {}
+
+ void Initialize(const media::AudioParameters& params) {
+ DCHECK(params.IsValid());
+ params_ = params;
+
+ // Use 10ms as the sink buffer size since that is the native packet size
+ // WebRtc is running on.
+ sink_buffer_size_ = params.sample_rate() / 100;
+ audio_wrapper_ =
+ media::AudioBus::Create(params.channels(), sink_buffer_size_);
+ buffer_.reset(new int16[params.frames_per_buffer() * params.channels()]);
+
+ // The size of the FIFO should be at least twice of the source buffer size
+ // or twice of the sink buffer size.
+ int buffer_size = std::max(
+ kMaxNumberOfBuffersInFifo * params.frames_per_buffer(),
+ kMaxNumberOfBuffersInFifo * sink_buffer_size_);
+ fifo_.reset(new media::AudioFifo(params.channels(), buffer_size));
+ }
+
+ void Push(media::AudioBus* audio_source) {
+ DCHECK(fifo_->frames() + audio_source->frames() <= fifo_->max_frames());
+ fifo_->Push(audio_source);
+ }
+
+ bool Consume() {
+ if (fifo_->frames() < audio_wrapper_->frames())
+ return false;
+
+ fifo_->Consume(audio_wrapper_.get(), 0, audio_wrapper_->frames());
+ audio_wrapper_->ToInterleaved(audio_wrapper_->frames(),
+ params_.bits_per_sample() / 8,
+ buffer());
+ return true;
+ }
+
+ int16* buffer() const { return buffer_.get(); }
+ const media::AudioParameters& params() const { return params_; }
+ int sink_buffer_size() const { return sink_buffer_size_; }
+
+ private:
+ ~ConfiguredBuffer() {}
+ friend class base::RefCounted<WebRtcLocalAudioTrack::ConfiguredBuffer>;
+
+ media::AudioParameters params_;
+ scoped_ptr<media::AudioBus> audio_wrapper_;
+ scoped_ptr<media::AudioFifo> fifo_;
+ scoped_ptr<int16[]> buffer_;
+ int sink_buffer_size_;
+};
+
scoped_refptr<WebRtcLocalAudioTrack> WebRtcLocalAudioTrack::Create(
const std::string& id,
const scoped_refptr<WebRtcAudioCapturer>& capturer,
+ WebAudioCapturerSource* webaudio_source,
webrtc::AudioSourceInterface* track_source,
const webrtc::MediaConstraintsInterface* constraints) {
talk_base::RefCountedObject<WebRtcLocalAudioTrack>* track =
new talk_base::RefCountedObject<WebRtcLocalAudioTrack>(
- id, capturer, track_source, constraints);
+ id, capturer, webaudio_source, track_source, constraints);
return track;
}
WebRtcLocalAudioTrack::WebRtcLocalAudioTrack(
const std::string& label,
const scoped_refptr<WebRtcAudioCapturer>& capturer,
+ WebAudioCapturerSource* webaudio_source,
webrtc::AudioSourceInterface* track_source,
const webrtc::MediaConstraintsInterface* constraints)
: webrtc::MediaStreamTrack<webrtc::AudioTrackInterface>(label),
capturer_(capturer),
+ webaudio_source_(webaudio_source),
track_source_(track_source),
need_audio_processing_(NeedsAudioProcessing(constraints)) {
- // The capturer with a valid device id is using microphone as source,
- // and APM (AudioProcessingModule) is turned on only for microphone data.
DCHECK(capturer.get());
DVLOG(1) << "WebRtcLocalAudioTrack::WebRtcLocalAudioTrack()";
}
@@ -80,19 +142,20 @@ WebRtcLocalAudioTrack::~WebRtcLocalAudioTrack() {
Stop();
}
-void WebRtcLocalAudioTrack::CaptureData(const int16* audio_data,
- int number_of_channels,
- int number_of_frames,
- int audio_delay_milliseconds,
- int volume,
- bool key_pressed) {
+void WebRtcLocalAudioTrack::Capture(media::AudioBus* audio_source,
+ int audio_delay_milliseconds,
+ int volume,
+ bool key_pressed) {
scoped_refptr<WebRtcAudioCapturer> capturer;
std::vector<int> voe_channels;
int sample_rate = 0;
+ int number_of_channels = 0;
+ int number_of_frames = 0;
SinkList sinks;
+ scoped_refptr<ConfiguredBuffer> current_buffer;
{
base::AutoLock auto_lock(lock_);
- // When the track is diabled, we simply return here.
+ // When the track is disabled, we simply return here.
// TODO(xians): Figure out if we should feed zero to sinks instead, in
// order to inject VAD data in such case.
if (!enabled())
@@ -100,35 +163,62 @@ void WebRtcLocalAudioTrack::CaptureData(const int16* audio_data,
capturer = capturer_;
voe_channels = voe_channels_;
- sample_rate = params_.sample_rate(),
+ current_buffer = buffer_;
+ sample_rate = current_buffer->params().sample_rate();
+ number_of_channels = current_buffer->params().channels();
+ number_of_frames = current_buffer->sink_buffer_size();
sinks = sinks_;
}
- // Feed the data to the sinks.
- for (SinkList::const_iterator it = sinks.begin(); it != sinks.end(); ++it) {
- int new_volume = (*it)->CaptureData(voe_channels,
- audio_data,
- sample_rate,
- number_of_channels,
- number_of_frames,
- audio_delay_milliseconds,
- volume,
- need_audio_processing_,
- key_pressed);
- if (new_volume != 0 && capturer.get())
- capturer->SetVolume(new_volume);
+ // Push the data to the fifo.
+ current_buffer->Push(audio_source);
+ // Only turn off the audio processing when the constrain is set to false as
+ // well as there is no correct delay value.
+ bool need_audio_processing = need_audio_processing_ ?
+ need_audio_processing_ : (audio_delay_milliseconds != 0);
+ int current_volume = volume;
+ while (current_buffer->Consume()) {
+ // Feed the data to the sinks.
+ for (SinkList::const_iterator it = sinks.begin(); it != sinks.end(); ++it) {
+ int new_volume = (*it)->CaptureData(voe_channels,
+ current_buffer->buffer(),
+ sample_rate,
+ number_of_channels,
+ number_of_frames,
+ audio_delay_milliseconds,
+ current_volume,
+ need_audio_processing,
+ key_pressed);
+ if (new_volume != 0 && capturer.get()) {
+ // Feed the new volume to WebRtc while changing the volume on the
+ // browser.
+ capturer->SetVolume(new_volume);
+ current_volume = new_volume;
+ }
+ }
}
}
void WebRtcLocalAudioTrack::SetCaptureFormat(
const media::AudioParameters& params) {
- base::AutoLock auto_lock(lock_);
- params_ = params;
+ if (!params.IsValid())
+ return;
+
+ scoped_refptr<ConfiguredBuffer> new_buffer(new ConfiguredBuffer());
+ new_buffer->Initialize(params);
+
+ SinkList sinks;
+ {
+ base::AutoLock auto_lock(lock_);
+ buffer_ = new_buffer;
+ sinks = sinks_;
+ }
// Update all the existing sinks with the new format.
- for (SinkList::const_iterator it = sinks_.begin();
- it != sinks_.end(); ++it)
+ for (SinkList::const_iterator it = sinks.begin();
+ it != sinks.end(); ++it) {
(*it)->SetCaptureFormat(params);
+ }
}
void WebRtcLocalAudioTrack::AddChannel(int channel_id) {
@@ -172,7 +262,8 @@ void WebRtcLocalAudioTrack::AddSink(WebRtcAudioCapturerSink* sink) {
DCHECK(thread_checker_.CalledOnValidThread());
DVLOG(1) << "WebRtcLocalAudioTrack::AddSink()";
base::AutoLock auto_lock(lock_);
- sink->SetCaptureFormat(params_);
+ if (buffer_.get())
+ sink->SetCaptureFormat(buffer_->params());
// Verify that |sink| is not already added to the list.
DCHECK(std::find_if(
@@ -207,8 +298,19 @@ void WebRtcLocalAudioTrack::RemoveSink(
void WebRtcLocalAudioTrack::Start() {
DCHECK(thread_checker_.CalledOnValidThread());
DVLOG(1) << "WebRtcLocalAudioTrack::Start()";
- if (capturer_.get())
- capturer_->AddTrack(this);
+ DCHECK(capturer_.get());
+ if (webaudio_source_.get()) {
+ // If the track is hooking up with WebAudio, do NOT add the track to the
+ // capturer as its sink otherwise two streams in different clock will be
+ // pushed through the same track.
+ WebRtcLocalAudioSourceProvider* source_provider =
+ static_cast<WebRtcLocalAudioSourceProvider*>(
+ capturer_->audio_source_provider());
+ webaudio_source_->Start(this, source_provider);
+ return;
+ }
+
+ capturer_->AddTrack(this);
}
void WebRtcLocalAudioTrack::Stop() {
@@ -217,7 +319,15 @@ void WebRtcLocalAudioTrack::Stop() {
if (!capturer_.get())
return;
- capturer_->RemoveTrack(this);
+ if (webaudio_source_.get()) {
+ // Called Stop() on the |webaudio_source_| explicitly so that
+ // |webaudio_source_| won't push more data to the track anymore.
+ // Also note that the track is not registered as a sink to the |capturer_|
+ // in such case and no need to call RemoveTrack().
+ webaudio_source_->Stop();
+ } else {
+ capturer_->RemoveTrack(this);
+ }
// Protect the pointers using the lock when accessing |sinks_| and
// setting the |capturer_| to NULL.
@@ -225,6 +335,7 @@ void WebRtcLocalAudioTrack::Stop() {
{
base::AutoLock auto_lock(lock_);
sinks = sinks_;
+ webaudio_source_ = NULL;
capturer_ = NULL;
}
diff --git a/content/renderer/media/webrtc_local_audio_track.h b/content/renderer/media/webrtc_local_audio_track.h
index a3b818e..01b1120 100644
--- a/content/renderer/media/webrtc_local_audio_track.h
+++ b/content/renderer/media/webrtc_local_audio_track.h
@@ -18,10 +18,15 @@
namespace cricket {
class AudioRenderer;
-}
+} // namespace cricket
+
+namespace media {
+class AudioBus;
+} // namespace media
namespace content {
+class WebAudioCapturerSource;
class WebRtcAudioCapturer;
class WebRtcAudioCapturerSinkOwner;
@@ -38,7 +43,8 @@ class CONTENT_EXPORT WebRtcLocalAudioTrack
static scoped_refptr<WebRtcLocalAudioTrack> Create(
const std::string& id,
const scoped_refptr<WebRtcAudioCapturer>& capturer,
- webrtc::AudioSourceInterface* stream_source,
+ WebAudioCapturerSource* webaudio_source,
+ webrtc::AudioSourceInterface* track_source,
const webrtc::MediaConstraintsInterface* constraints);
// Add a sink to the track. This function will trigger a SetCaptureFormat()
@@ -58,13 +64,11 @@ class CONTENT_EXPORT WebRtcLocalAudioTrack
// should be called only once when audio track going away.
void Stop();
- // Method called by the capturer to deliever the capture data.
- void CaptureData(const int16* audio_data,
- int number_of_channels,
- int number_of_frames,
- int audio_delay_milliseconds,
- int volume,
- bool key_pressed);
+ // Method called by the capturer to deliver the capture data.
+ void Capture(media::AudioBus* audio_source,
+ int audio_delay_milliseconds,
+ int volume,
+ bool key_pressed);
// Method called by the capturer to set the audio parameters used by source
// of the capture data..
@@ -72,10 +76,13 @@ class CONTENT_EXPORT WebRtcLocalAudioTrack
void SetCaptureFormat(const media::AudioParameters& params);
protected:
- WebRtcLocalAudioTrack(const std::string& label,
- const scoped_refptr<WebRtcAudioCapturer>& capturer,
- webrtc::AudioSourceInterface* track_source,
- const webrtc::MediaConstraintsInterface* constraints);
+ WebRtcLocalAudioTrack(
+ const std::string& label,
+ const scoped_refptr<WebRtcAudioCapturer>& capturer,
+ WebAudioCapturerSource* webaudio_source,
+ webrtc::AudioSourceInterface* track_source,
+ const webrtc::MediaConstraintsInterface* constraints);
+
virtual ~WebRtcLocalAudioTrack();
private:
@@ -96,6 +103,10 @@ class CONTENT_EXPORT WebRtcLocalAudioTrack
// The WebRtcAudioCapturer is today created by WebRtcAudioDeviceImpl.
scoped_refptr<WebRtcAudioCapturer> capturer_;
+ // The source of the audio track which is used by WebAudio, which provides
+ // data to the audio track when hooking up with WebAudio.
+ scoped_refptr<WebAudioCapturerSource> webaudio_source_;
+
// The source of the audio track which handles the audio constraints.
// TODO(xians): merge |track_source_| to |capturer_|.
talk_base::scoped_refptr<webrtc::AudioSourceInterface> track_source_;
@@ -106,9 +117,6 @@ class CONTENT_EXPORT WebRtcLocalAudioTrack
// Used to DCHECK that we are called on the correct thread.
base::ThreadChecker thread_checker_;
- // Cached values of the audio parameters used by the |source_| and |sinks_|.
- media::AudioParameters params_;
-
// Protects |params_| and |sinks_|.
mutable base::Lock lock_;
@@ -117,6 +125,11 @@ class CONTENT_EXPORT WebRtcLocalAudioTrack
bool need_audio_processing_;
+ // Buffers used for temporary storage during capture callbacks.
+ // Allocated during initialization.
+ class ConfiguredBuffer;
+ scoped_refptr<ConfiguredBuffer> buffer_;
+
DISALLOW_COPY_AND_ASSIGN(WebRtcLocalAudioTrack);
};
diff --git a/content/renderer/media/webrtc_local_audio_track_unittest.cc b/content/renderer/media/webrtc_local_audio_track_unittest.cc
index 7d125dc..1014ebe 100644
--- a/content/renderer/media/webrtc_local_audio_track_unittest.cc
+++ b/content/renderer/media/webrtc_local_audio_track_unittest.cc
@@ -6,6 +6,7 @@
#include "base/test/test_timeouts.h"
#include "content/renderer/media/rtc_media_constraints.h"
#include "content/renderer/media/webrtc_audio_capturer.h"
+#include "content/renderer/media/webrtc_local_audio_source_provider.h"
#include "content/renderer/media/webrtc_local_audio_track.h"
#include "media/audio/audio_parameters.h"
#include "media/base/audio_bus.h"
@@ -133,13 +134,19 @@ class MockWebRtcAudioCapturerSink : public WebRtcAudioCapturerSink {
class WebRtcLocalAudioTrackTest : public ::testing::Test {
protected:
virtual void SetUp() OVERRIDE {
+ params_.Reset(media::AudioParameters::AUDIO_PCM_LOW_LATENCY,
+ media::CHANNEL_LAYOUT_STEREO, 2, 0, 48000, 16, 480);
capturer_ = WebRtcAudioCapturer::CreateCapturer();
+ WebRtcLocalAudioSourceProvider* source_provider =
+ static_cast<WebRtcLocalAudioSourceProvider*>(
+ capturer_->audio_source_provider());
+ source_provider->SetSinkParamsForTesting(params_);
capturer_source_ = new MockCapturerSource();
EXPECT_CALL(*capturer_source_.get(), Initialize(_, capturer_.get(), 0))
.WillOnce(Return());
capturer_->SetCapturerSource(capturer_source_,
- media::CHANNEL_LAYOUT_STEREO,
- 48000);
+ params_.channel_layout(),
+ params_.sample_rate());
EXPECT_CALL(*capturer_source_.get(), SetAutomaticGainControl(false))
.WillOnce(Return());
@@ -154,6 +161,7 @@ class WebRtcLocalAudioTrackTest : public ::testing::Test {
audio_thread_.reset();
}
+ media::AudioParameters params_;
scoped_refptr<MockCapturerSource> capturer_source_;
scoped_refptr<WebRtcAudioCapturer> capturer_;
scoped_ptr<FakeAudioThread> audio_thread_;
@@ -167,7 +175,7 @@ TEST_F(WebRtcLocalAudioTrackTest, ConnectAndDisconnectOneSink) {
EXPECT_CALL(*capturer_source_.get(), Start()).WillOnce(Return());
RTCMediaConstraints constraints;
scoped_refptr<WebRtcLocalAudioTrack> track =
- WebRtcLocalAudioTrack::Create(std::string(), capturer_, NULL,
+ WebRtcLocalAudioTrack::Create(std::string(), capturer_, NULL, NULL,
&constraints);
track->Start();
EXPECT_TRUE(track->enabled());
@@ -187,7 +195,7 @@ TEST_F(WebRtcLocalAudioTrackTest, ConnectAndDisconnectOneSink) {
CaptureData(kNumberOfNetworkChannels,
params.sample_rate(),
params.channels(),
- params.frames_per_buffer(),
+ params.sample_rate() / 100,
0,
0,
false,
@@ -213,7 +221,7 @@ TEST_F(WebRtcLocalAudioTrackTest, DISABLED_DisableEnableAudioTrack) {
EXPECT_CALL(*capturer_source_.get(), Start()).WillOnce(Return());
RTCMediaConstraints constraints;
scoped_refptr<WebRtcLocalAudioTrack> track =
- WebRtcLocalAudioTrack::Create(std::string(), capturer_, NULL,
+ WebRtcLocalAudioTrack::Create(std::string(), capturer_, NULL, NULL,
&constraints);
track->Start();
static_cast<webrtc::AudioTrackInterface*>(track.get())->
@@ -229,7 +237,7 @@ TEST_F(WebRtcLocalAudioTrackTest, DISABLED_DisableEnableAudioTrack) {
CaptureData(1,
params.sample_rate(),
params.channels(),
- params.frames_per_buffer(),
+ params.sample_rate() / 100,
0,
0,
false,
@@ -242,7 +250,7 @@ TEST_F(WebRtcLocalAudioTrackTest, DISABLED_DisableEnableAudioTrack) {
CaptureData(1,
params.sample_rate(),
params.channels(),
- params.frames_per_buffer(),
+ params.sample_rate() / 100,
0,
0,
false,
@@ -263,7 +271,7 @@ TEST_F(WebRtcLocalAudioTrackTest, MultipleAudioTracks) {
EXPECT_CALL(*capturer_source_.get(), Start()).WillOnce(Return());
RTCMediaConstraints constraints;
scoped_refptr<WebRtcLocalAudioTrack> track_1 =
- WebRtcLocalAudioTrack::Create(std::string(), capturer_, NULL,
+ WebRtcLocalAudioTrack::Create(std::string(), capturer_, NULL, NULL,
&constraints);
track_1->Start();
static_cast<webrtc::AudioTrackInterface*>(track_1.get())->
@@ -278,7 +286,7 @@ TEST_F(WebRtcLocalAudioTrackTest, MultipleAudioTracks) {
CaptureData(1,
params.sample_rate(),
params.channels(),
- params.frames_per_buffer(),
+ params.sample_rate() / 100,
0,
0,
false,
@@ -288,7 +296,7 @@ TEST_F(WebRtcLocalAudioTrackTest, MultipleAudioTracks) {
EXPECT_TRUE(event_1.TimedWait(TestTimeouts::tiny_timeout()));
scoped_refptr<WebRtcLocalAudioTrack> track_2 =
- WebRtcLocalAudioTrack::Create(std::string(), capturer_, NULL,
+ WebRtcLocalAudioTrack::Create(std::string(), capturer_, NULL, NULL,
&constraints);
track_2->Start();
static_cast<webrtc::AudioTrackInterface*>(track_2.get())->
@@ -306,7 +314,7 @@ TEST_F(WebRtcLocalAudioTrackTest, MultipleAudioTracks) {
CaptureData(1,
params.sample_rate(),
params.channels(),
- params.frames_per_buffer(),
+ params.sample_rate() / 100,
0,
0,
false,
@@ -316,7 +324,7 @@ TEST_F(WebRtcLocalAudioTrackTest, MultipleAudioTracks) {
CaptureData(1,
params.sample_rate(),
params.channels(),
- params.frames_per_buffer(),
+ params.sample_rate() / 100,
0,
0,
false,
@@ -343,7 +351,7 @@ TEST_F(WebRtcLocalAudioTrackTest, StartOneAudioTrack) {
EXPECT_CALL(*capturer_source_.get(), Start()).Times(1);
RTCMediaConstraints constraints;
scoped_refptr<WebRtcLocalAudioTrack> track =
- WebRtcLocalAudioTrack::Create(std::string(), capturer_, NULL,
+ WebRtcLocalAudioTrack::Create(std::string(), capturer_, NULL, NULL,
&constraints);
track->Start();
@@ -362,7 +370,7 @@ TEST_F(WebRtcLocalAudioTrackTest, StartAndStopAudioTracks) {
EXPECT_CALL(*capturer_source_.get(), Start()).WillOnce(SignalEvent(&event));
RTCMediaConstraints constraints;
scoped_refptr<WebRtcLocalAudioTrack> track_1 =
- WebRtcLocalAudioTrack::Create(std::string(), capturer_, NULL,
+ WebRtcLocalAudioTrack::Create(std::string(), capturer_, NULL, NULL,
&constraints);
static_cast<webrtc::AudioTrackInterface*>(track_1.get())->
GetRenderer()->AddChannel(0);
@@ -382,7 +390,7 @@ TEST_F(WebRtcLocalAudioTrackTest, StartAndStopAudioTracks) {
// since it has been started.
EXPECT_CALL(*capturer_source_.get(), Start()).Times(0);
scoped_refptr<WebRtcLocalAudioTrack> track_2 =
- WebRtcLocalAudioTrack::Create(std::string(), capturer_, NULL,
+ WebRtcLocalAudioTrack::Create(std::string(), capturer_, NULL, NULL,
&constraints);
track_2->Start();
static_cast<webrtc::AudioTrackInterface*>(track_2.get())->
@@ -415,7 +423,7 @@ TEST_F(WebRtcLocalAudioTrackTest, SetNewSourceForCapturerAfterStartTrack) {
EXPECT_CALL(*capturer_source_.get(), Start()).Times(1);
RTCMediaConstraints constraints;
scoped_refptr<WebRtcLocalAudioTrack> track =
- WebRtcLocalAudioTrack::Create(std::string(), capturer_, NULL,
+ WebRtcLocalAudioTrack::Create(std::string(), capturer_, NULL, NULL,
&constraints);
track->Start();
@@ -427,8 +435,8 @@ TEST_F(WebRtcLocalAudioTrackTest, SetNewSourceForCapturerAfterStartTrack) {
.WillOnce(Return());
EXPECT_CALL(*new_source.get(), Start()).WillOnce(Return());
capturer_->SetCapturerSource(new_source,
- media::CHANNEL_LAYOUT_STEREO,
- 48000);
+ params_.channel_layout(),
+ params_.sample_rate());
// Stop the track.
EXPECT_CALL(*new_source.get(), Stop());
@@ -442,7 +450,7 @@ TEST_F(WebRtcLocalAudioTrackTest, ConnectTracksToDifferentCapturers) {
EXPECT_CALL(*capturer_source_.get(), Start()).Times(1);
RTCMediaConstraints constraints;
scoped_refptr<WebRtcLocalAudioTrack> track_1 =
- WebRtcLocalAudioTrack::Create(std::string(), capturer_, NULL,
+ WebRtcLocalAudioTrack::Create(std::string(), capturer_, NULL, NULL,
&constraints);
track_1->Start();
@@ -466,6 +474,10 @@ TEST_F(WebRtcLocalAudioTrackTest, ConnectTracksToDifferentCapturers) {
// Create a new capturer with new source with different audio format.
scoped_refptr<WebRtcAudioCapturer> new_capturer(
WebRtcAudioCapturer::CreateCapturer());
+ WebRtcLocalAudioSourceProvider* source_provider =
+ static_cast<WebRtcLocalAudioSourceProvider*>(
+ new_capturer->audio_source_provider());
+ source_provider->SetSinkParamsForTesting(params_);
scoped_refptr<MockCapturerSource> new_source(new MockCapturerSource());
EXPECT_CALL(*new_source.get(), Initialize(_, new_capturer.get(), 0))
.WillOnce(Return());
@@ -482,7 +494,7 @@ TEST_F(WebRtcLocalAudioTrackTest, ConnectTracksToDifferentCapturers) {
// Setup the second audio track, connect it to the new capturer and start it.
EXPECT_CALL(*new_source.get(), Start()).Times(1);
scoped_refptr<WebRtcLocalAudioTrack> track_2 =
- WebRtcLocalAudioTrack::Create(std::string(), new_capturer, NULL,
+ WebRtcLocalAudioTrack::Create(std::string(), new_capturer, NULL, NULL,
&constraints);
track_2->Start();