summaryrefslogtreecommitdiffstats
path: root/content/renderer
diff options
context:
space:
mode:
authorcrogers@google.com <crogers@google.com@0039d316-1c4b-4281-b951-d872f2087c98>2013-01-17 03:32:29 +0000
committercrogers@google.com <crogers@google.com@0039d316-1c4b-4281-b951-d872f2087c98>2013-01-17 03:32:29 +0000
commit3c89eaf563f7e5665b4b3194c3e83950eee54042 (patch)
treed0cfb8e1665524c72bbff03a8161c2b5e895f5d7 /content/renderer
parentef2f34965839c3a5560bed0cc492b55ac14d8861 (diff)
downloadchromium_src-3c89eaf563f7e5665b4b3194c3e83950eee54042.zip
chromium_src-3c89eaf563f7e5665b4b3194c3e83950eee54042.tar.gz
chromium_src-3c89eaf563f7e5665b4b3194c3e83950eee54042.tar.bz2
Add chromium support for MediaStreamAudioDestinationNode
We add smarts into MediaStreamDependencyFactory::CreateNativeLocalMediaStream() to handle MediaStreams originating from WebAudio. Please see companion WebKit patches: https://bugs.webkit.org/show_bug.cgi?id=101815 https://bugs.webkit.org/show_bug.cgi?id=106053 BUG=none TEST=manual test http://www.corp.google.com/~henrika/WebAudio/MediaStreamAudioDestinationNode.html Review URL: https://chromiumcodereview.appspot.com/11369171 git-svn-id: svn://svn.chromium.org/chrome/trunk/src@177330 0039d316-1c4b-4281-b951-d872f2087c98
Diffstat (limited to 'content/renderer')
-rw-r--r--content/renderer/media/media_stream_dependency_factory.cc80
-rw-r--r--content/renderer/media/media_stream_dependency_factory.h4
-rw-r--r--content/renderer/media/media_stream_impl.cc49
-rw-r--r--content/renderer/media/webaudio_capturer_source.cc102
-rw-r--r--content/renderer/media/webaudio_capturer_source.h82
-rw-r--r--content/renderer/media/webrtc_audio_capturer.cc33
-rw-r--r--content/renderer/media/webrtc_audio_capturer.h4
7 files changed, 311 insertions, 43 deletions
diff --git a/content/renderer/media/media_stream_dependency_factory.cc b/content/renderer/media/media_stream_dependency_factory.cc
index 31072a2..84d59eb 100644
--- a/content/renderer/media/media_stream_dependency_factory.cc
+++ b/content/renderer/media/media_stream_dependency_factory.cc
@@ -228,35 +228,77 @@ void MediaStreamDependencyFactory::CreateNativeMediaSources(
void MediaStreamDependencyFactory::CreateNativeLocalMediaStream(
WebKit::WebMediaStreamDescriptor* description) {
- DCHECK(PeerConnectionFactoryCreated());
+ if (!EnsurePeerConnectionFactory()) {
+ DVLOG(1) << "EnsurePeerConnectionFactory() failed!";
+ return;
+ }
std::string label = UTF16ToUTF8(description->label());
scoped_refptr<webrtc::LocalMediaStreamInterface> native_stream =
CreateLocalMediaStream(label);
+ WebRtcAudioCapturer* capturer =
+ GetWebRtcAudioDevice() ? GetWebRtcAudioDevice()->capturer() : 0;
+ if (!capturer)
+ DVLOG(1) << "CreateNativeLocalMediaStream: missing WebRtcAudioCapturer.";
+
// Add audio tracks.
WebKit::WebVector<WebKit::WebMediaStreamComponent> audio_components;
description->audioSources(audio_components);
+
for (size_t i = 0; i < audio_components.size(); ++i) {
- const WebKit::WebMediaStreamSource& source = audio_components[i].source();
- MediaStreamSourceExtraData* source_data =
- static_cast<MediaStreamSourceExtraData*>(source.extraData());
- if (!source_data) {
- // TODO(perkj): Implement support for sources from remote MediaStreams.
- NOTIMPLEMENTED();
- continue;
+ WebKit::WebMediaStreamSource source = audio_components[i].source();
+
+ // See if we're adding a WebAudio MediaStream.
+ if (source.requiresAudioConsumer()) {
+ if (!webaudio_capturer_source_.get() && capturer) {
+ DCHECK(GetWebRtcAudioDevice());
+
+ // TODO(crogers, xians): In reality we should be able to send a unique
+ // audio stream to each PeerConnection separately. But currently WebRTC
+ // is only able to handle a global audio stream sent to ALL peers.
+
+ // For lifetime, we're relying on the fact that
+ // |webaudio_capturer_source_| will live longer than any
+ // MediaStreamSource, since we're never calling removeAudioConsumer().
+ webaudio_capturer_source_ = new WebAudioCapturerSource(capturer);
+ source.addAudioConsumer(webaudio_capturer_source_.get());
+
+ scoped_refptr<webrtc::LocalAudioTrackInterface> audio_track(
+ CreateLocalAudioTrack(label + "a0", NULL));
+ native_stream->AddTrack(audio_track);
+ audio_track->set_enabled(audio_components[i].isEnabled());
+ } else {
+ // TODO(crogers): this is very likely to be less important, but
+ // in theory we should be able to "connect" multiple WebAudio
+ // MediaStreams to a single peer, mixing their results.
+ // Instead we just ignore additional ones after the first.
+ LOG(WARNING)
+ << "Multiple MediaStreamAudioDestinationNodes not yet supported!";
+ }
+ } else {
+ MediaStreamSourceExtraData* source_data =
+ static_cast<MediaStreamSourceExtraData*>(source.extraData());
+
+ if (!source_data) {
+ // TODO(perkj): Implement support for sources from
+ // remote MediaStreams.
+ NOTIMPLEMENTED();
+ continue;
+ }
+
+ // TODO(perkj): Refactor the creation of audio tracks to use a proper
+ // interface for receiving audio input data. Currently NULL is passed
+ // since the |audio_device| is the wrong class and is unused.
+ scoped_refptr<webrtc::LocalAudioTrackInterface> audio_track(
+ CreateLocalAudioTrack(UTF16ToUTF8(source.id()), NULL));
+ native_stream->AddTrack(audio_track);
+ audio_track->set_enabled(audio_components[i].isEnabled());
+ // TODO(xians): This set the source of all audio tracks to the same
+ // microphone. Implement support for setting the source per audio track
+ // instead.
+ SetAudioDeviceSessionId(source_data->device_info().session_id);
}
- // TODO(perkj): Refactor the creation of audio tracks to use a proper
- // interface for receiving audio input data. Currently NULL is passed since
- // the |audio_device| is the wrong class and is unused.
- scoped_refptr<webrtc::LocalAudioTrackInterface> audio_track(
- CreateLocalAudioTrack(UTF16ToUTF8(source.id()), NULL));
- native_stream->AddTrack(audio_track);
- audio_track->set_enabled(audio_components[i].isEnabled());
- // TODO(xians): This set the source of all audio tracks to the same
- // microphone. Implement support for setting the source per audio track
- // instead.
- SetAudioDeviceSessionId(source_data->device_info().session_id);
}
// Add video tracks.
diff --git a/content/renderer/media/media_stream_dependency_factory.h b/content/renderer/media/media_stream_dependency_factory.h
index 97af73d..e03f985 100644
--- a/content/renderer/media/media_stream_dependency_factory.h
+++ b/content/renderer/media/media_stream_dependency_factory.h
@@ -12,6 +12,7 @@
#include "base/threading/thread.h"
#include "content/common/content_export.h"
#include "content/renderer/media/media_stream_extra_data.h"
+#include "content/renderer/media/webaudio_capturer_source.h"
#include "content/renderer/p2p/socket_dispatcher.h"
#include "third_party/libjingle/source/talk/app/webrtc/peerconnectioninterface.h"
#include "third_party/libjingle/source/talk/app/webrtc/videosourceinterface.h"
@@ -172,6 +173,9 @@ class CONTENT_EXPORT MediaStreamDependencyFactory
talk_base::Thread* worker_thread_;
base::Thread chrome_worker_thread_;
+ // Handles audio from MediaStreamAudioDestinationNode.
+ scoped_refptr<WebAudioCapturerSource> webaudio_capturer_source_;
+
DISALLOW_COPY_AND_ASSIGN(MediaStreamDependencyFactory);
};
diff --git a/content/renderer/media/media_stream_impl.cc b/content/renderer/media/media_stream_impl.cc
index 7e5bdf2..f0315b9 100644
--- a/content/renderer/media/media_stream_impl.cc
+++ b/content/renderer/media/media_stream_impl.cc
@@ -70,21 +70,7 @@ void UpdateOptionsIfTabMediaRequest(
// Get session ID for the selected microphone to ensure that we start
// capturing audio using the correct input device.
-static int GetSessionId(const WebKit::WebMediaStreamDescriptor& descriptor) {
- WebKit::WebVector<WebKit::WebMediaStreamComponent> audio_components;
- descriptor.audioSources(audio_components);
- if (audio_components.size() != 1) {
- // TODO(henrika): add support for more than one audio track.
- NOTIMPLEMENTED();
- return -1;
- }
-
- if (!audio_components[0].isEnabled()) {
- DVLOG(1) << "audio track is disabled";
- return -1;
- }
-
- const WebKit::WebMediaStreamSource& source = audio_components[0].source();
+static int GetSessionId(const WebKit::WebMediaStreamSource& source) {
MediaStreamSourceExtraData* source_data =
static_cast<MediaStreamSourceExtraData*>(source.extraData());
if (!source_data) {
@@ -322,10 +308,30 @@ MediaStreamImpl::GetAudioRenderer(const GURL& url) {
DVLOG(1) << "creating local audio renderer for stream:"
<< extra_data->local_stream()->label();
- // Get session ID for the local media stream.
- int session_id = GetSessionId(descriptor);
- if (session_id == -1)
+ WebKit::WebVector<WebKit::WebMediaStreamComponent> audio_components;
+ descriptor.audioSources(audio_components);
+ if (audio_components.size() != 1) {
+ // TODO(henrika): add support for more than one audio track.
+ LOG(WARNING) << "Multiple MediaStream audio tracks not supported";
+ return NULL;
+ }
+
+ if (!audio_components[0].isEnabled()) {
+ DVLOG(1) << "audio track is disabled";
return NULL;
+ }
+
+ int session_id = 0;
+ const WebKit::WebMediaStreamSource& source = audio_components[0].source();
+ if (!source.requiresAudioConsumer()) {
+ session_id = GetSessionId(source);
+ if (session_id == -1) {
+ return NULL;
+ }
+ } else {
+ DVLOG(1) << "WebAudio MediaStream is detected";
+ session_id = -1;
+ }
// Create the local audio renderer using the specified session ID.
scoped_refptr<WebRtcLocalAudioRenderer> local_renderer =
@@ -345,6 +351,8 @@ void MediaStreamImpl::OnStreamGenerated(
const StreamDeviceInfoArray& audio_array,
const StreamDeviceInfoArray& video_array) {
DCHECK(CalledOnValidThread());
+ DVLOG(1) << "MediaStreamImpl::OnStreamGenerated("
+ << request_id << "," << label << ")";
UserMediaRequestInfo* request_info = FindUserMediaRequestInfo(request_id);
if (!request_info) {
@@ -598,7 +606,6 @@ scoped_refptr<WebRtcAudioRenderer> MediaStreamImpl::CreateRemoteAudioRenderer(
scoped_refptr<WebRtcLocalAudioRenderer>
MediaStreamImpl::CreateLocalAudioRenderer(int session_id) {
- DCHECK_NE(session_id, -1);
// Ensure that the existing capturer reads data from the selected microphone.
scoped_refptr<WebRtcAudioCapturer> source =
dependency_factory_->GetWebRtcAudioDevice()->capturer();
@@ -608,7 +615,9 @@ MediaStreamImpl::CreateLocalAudioRenderer(int session_id) {
// TODO(henrika): extend support of capture sample rates.
return NULL;
}
- source->SetDevice(session_id);
+
+ if (session_id != -1)
+ source->SetDevice(session_id);
// Create a new WebRtcLocalAudioRenderer instance and connect it to the
// existing WebRtcAudioCapturer so that the renderer can use it as source.
diff --git a/content/renderer/media/webaudio_capturer_source.cc b/content/renderer/media/webaudio_capturer_source.cc
new file mode 100644
index 0000000..c201582
--- /dev/null
+++ b/content/renderer/media/webaudio_capturer_source.cc
@@ -0,0 +1,102 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "content/renderer/media/webaudio_capturer_source.h"
+
+#include "base/logging.h"
+#include "content/renderer/media/webrtc_audio_capturer.h"
+
+using media::AudioBus;
+using media::AudioFifo;
+using media::AudioParameters;
+using media::ChannelLayout;
+using media::CHANNEL_LAYOUT_MONO;
+using media::CHANNEL_LAYOUT_STEREO;
+
+static const int kFifoSize = 2048;
+
+namespace content {
+
+WebAudioCapturerSource::WebAudioCapturerSource(WebRtcAudioCapturer* capturer)
+ : capturer_(capturer),
+ set_format_channels_(0),
+ callback_(0),
+ started_(false) {
+}
+
+WebAudioCapturerSource::~WebAudioCapturerSource() {
+}
+
+void WebAudioCapturerSource::setFormat(
+ size_t number_of_channels, float sample_rate) {
+ if (number_of_channels <= 2) {
+ set_format_channels_ = number_of_channels;
+ ChannelLayout channel_layout =
+ number_of_channels == 1 ? CHANNEL_LAYOUT_MONO : CHANNEL_LAYOUT_STEREO;
+ capturer_->SetCapturerSource(this, channel_layout, sample_rate);
+ capturer_->Start();
+ } else {
+ // TODO(crogers): Handle more than just the mono and stereo cases.
+ LOG(WARNING) << "WebAudioCapturerSource::setFormat() : unhandled format.";
+ }
+}
+
+void WebAudioCapturerSource::Initialize(
+ const media::AudioParameters& params,
+ media::AudioCapturerSource::CaptureCallback* callback,
+ media::AudioCapturerSource::CaptureEventHandler* event_handler) {
+ // The downstream client should be configured the same as what WebKit
+ // is feeding it.
+ DCHECK_EQ(set_format_channels_, params.channels());
+
+ base::AutoLock auto_lock(lock_);
+ params_ = params;
+ callback_ = callback;
+ wrapper_bus_ = AudioBus::CreateWrapper(params.channels());
+ capture_bus_ = AudioBus::Create(params);
+ fifo_.reset(new AudioFifo(params.channels(), kFifoSize));
+}
+
+void WebAudioCapturerSource::Start() {
+ started_ = true;
+}
+
+void WebAudioCapturerSource::Stop() {
+ started_ = false;
+}
+
+void WebAudioCapturerSource::consumeAudio(
+ const WebKit::WebVector<const float*>& audio_data,
+ size_t number_of_frames) {
+ base::AutoLock auto_lock(lock_);
+
+ if (!callback_)
+ return;
+
+ wrapper_bus_->set_frames(number_of_frames);
+
+ // Make sure WebKit is honoring what it told us up front
+ // about the channels.
+ DCHECK_EQ(set_format_channels_, static_cast<int>(audio_data.size()));
+ DCHECK_EQ(set_format_channels_, wrapper_bus_->channels());
+
+ for (size_t i = 0; i < audio_data.size(); ++i)
+ wrapper_bus_->SetChannelData(i, const_cast<float*>(audio_data[i]));
+
+ // Handle mismatch between WebAudio buffer-size and WebRTC.
+ int available = fifo_->max_frames() - fifo_->frames();
+ if (available < static_cast<int>(number_of_frames)) {
+ LOG(ERROR) << "WebAudioCapturerSource::Consume() : FIFO overrun.";
+ return;
+ }
+
+ fifo_->Push(wrapper_bus_.get());
+ int capture_frames = params_.frames_per_buffer();
+ while (fifo_->frames() >= capture_frames) {
+ fifo_->Consume(capture_bus_.get(), 0, capture_frames);
+ callback_->Capture(capture_bus_.get(), 0, 1.0);
+ }
+}
+
+} // namespace content
diff --git a/content/renderer/media/webaudio_capturer_source.h b/content/renderer/media/webaudio_capturer_source.h
new file mode 100644
index 0000000..8f248bad
--- /dev/null
+++ b/content/renderer/media/webaudio_capturer_source.h
@@ -0,0 +1,82 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef CONTENT_RENDERER_MEDIA_WEBAUDIO_CAPTURER_SOURCE_H_
+#define CONTENT_RENDERER_MEDIA_WEBAUDIO_CAPTURER_SOURCE_H_
+
+#include "base/memory/ref_counted.h"
+#include "base/synchronization/lock.h"
+#include "media/audio/audio_parameters.h"
+#include "media/base/audio_capturer_source.h"
+#include "media/base/audio_fifo.h"
+#include "third_party/WebKit/Source/Platform/chromium/public/WebAudioDestinationConsumer.h"
+#include "third_party/WebKit/Source/WebKit/chromium/public/platform/WebVector.h"
+
+namespace content {
+
+class WebRtcAudioCapturer;
+
+// WebAudioCapturerSource is the missing link between
+// WebAudio's MediaStreamAudioDestinationNode and WebRtcAudioCapturer.
+//
+// 1. WebKit calls the setFormat() method setting up the basic stream format
+// (channels, and sample-rate). At this time, it dispatches this information
+// to the WebRtcAudioCapturer by calling its SetCapturerSource() method.
+// 2. Initialize() is called, where we should get back the same
+// stream format information as (1). We also get the CaptureCallback here.
+// 3. consumeAudio() is called periodically by WebKit which dispatches the
+// audio stream to the CaptureCallback::Capture() method.
+class WebAudioCapturerSource
+ : public media::AudioCapturerSource,
+ public WebKit::WebAudioDestinationConsumer {
+ public:
+ explicit WebAudioCapturerSource(WebRtcAudioCapturer* capturer);
+
+ // WebAudioDestinationConsumer implementation.
+ // setFormat() is called early on, so that we can configure the capturer.
+ virtual void setFormat(size_t number_of_channels, float sample_rate) OVERRIDE;
+ // MediaStreamAudioDestinationNode periodically calls consumeAudio().
+ virtual void consumeAudio(const WebKit::WebVector<const float*>& audio_data,
+ size_t number_of_frames) OVERRIDE;
+
+ // AudioCapturerSource implementation.
+ virtual void Initialize(
+ const media::AudioParameters& params,
+ media::AudioCapturerSource::CaptureCallback* callback,
+ media::AudioCapturerSource::CaptureEventHandler* event_handler) OVERRIDE;
+
+ virtual void Start() OVERRIDE;
+ virtual void Stop() OVERRIDE;
+ virtual void SetVolume(double volume) OVERRIDE { }
+ virtual void SetDevice(int session_id) OVERRIDE { }
+ virtual void SetAutomaticGainControl(bool enable) OVERRIDE { }
+
+ private:
+ virtual ~WebAudioCapturerSource();
+
+ WebRtcAudioCapturer* capturer_;
+
+ int set_format_channels_;
+ media::AudioParameters params_;
+ media::AudioCapturerSource::CaptureCallback* callback_;
+
+ // Wraps data coming from HandleCapture().
+ scoped_ptr<media::AudioBus> wrapper_bus_;
+
+ // Bus for reading from FIFO and calling the CaptureCallback.
+ scoped_ptr<media::AudioBus> capture_bus_;
+
+ // Handles mismatch between WebAudio buffer size and WebRTC.
+ scoped_ptr<media::AudioFifo> fifo_;
+
+ // Synchronizes HandleCapture() with AudioCapturerSource calls.
+ base::Lock lock_;
+ bool started_;
+
+ DISALLOW_COPY_AND_ASSIGN(WebAudioCapturerSource);
+};
+
+} // namespace content
+
+#endif // CONTENT_RENDERER_MEDIA_WEBAUDIO_CAPTURER_SOURCE_H_
diff --git a/content/renderer/media/webrtc_audio_capturer.cc b/content/renderer/media/webrtc_audio_capturer.cc
index 7486e08..c668a97 100644
--- a/content/renderer/media/webrtc_audio_capturer.cc
+++ b/content/renderer/media/webrtc_audio_capturer.cc
@@ -100,7 +100,9 @@ void WebRtcAudioCapturer::RemoveCapturerSink(WebRtcAudioCapturerSink* sink) {
}
void WebRtcAudioCapturer::SetCapturerSource(
- const scoped_refptr<media::AudioCapturerSource>& source) {
+ const scoped_refptr<media::AudioCapturerSource>& source,
+ media::ChannelLayout channel_layout,
+ float sample_rate) {
DVLOG(1) << "SetCapturerSource()";
scoped_refptr<media::AudioCapturerSource> old_source;
{
@@ -113,9 +115,33 @@ void WebRtcAudioCapturer::SetCapturerSource(
}
// Detach the old source from normal recording.
- if (old_source)
+ if (old_source) {
old_source->Stop();
+ // Dispatch the new parameters both to the sink(s) and to the new source.
+ // The idea is to get rid of any dependency of the microphone parameters
+ // which would normally be used by default.
+
+ int buffer_size = GetBufferSizeForSampleRate(sample_rate);
+ if (!buffer_size) {
+ DLOG(ERROR) << "Unsupported sample-rate: " << sample_rate;
+ return;
+ }
+
+ params_.Reset(params_.format(),
+ channel_layout,
+ sample_rate,
+ 16, // ignored since the audio stack uses float32.
+ buffer_size);
+
+ buffer_.reset(new int16[params_.frames_per_buffer() * params_.channels()]);
+
+ for (SinkList::const_iterator it = sinks_.begin();
+ it != sinks_.end(); ++it) {
+ (*it)->SetCaptureFormat(params_);
+ }
+ }
+
if (source)
source->Initialize(params_, this, this);
}
@@ -212,7 +238,8 @@ bool WebRtcAudioCapturer::Initialize() {
// Create and configure the default audio capturing source. The |source_|
// will be overwritten if the client call the source calls
// SetCapturerSource().
- SetCapturerSource(AudioDeviceFactory::NewInputDevice());
+ SetCapturerSource(
+ AudioDeviceFactory::NewInputDevice(), channel_layout, sample_rate);
UMA_HISTOGRAM_ENUMERATION("WebRTC.AudioInputChannelLayout",
channel_layout, media::CHANNEL_LAYOUT_MAX);
diff --git a/content/renderer/media/webrtc_audio_capturer.h b/content/renderer/media/webrtc_audio_capturer.h
index a3a22b1..bfd0442 100644
--- a/content/renderer/media/webrtc_audio_capturer.h
+++ b/content/renderer/media/webrtc_audio_capturer.h
@@ -58,7 +58,9 @@ class WebRtcAudioCapturer
// provide their own captured audio data. Client is responsible for calling
// Start() on its own source to have the ball rolling.
void SetCapturerSource(
- const scoped_refptr<media::AudioCapturerSource>& source);
+ const scoped_refptr<media::AudioCapturerSource>& source,
+ media::ChannelLayout channel_layout,
+ float sample_rate);
// The |on_device_stopped_cb| callback will be called in OnDeviceStopped().
void SetStopCallback(const base::Closure& on_device_stopped_cb);