summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorxians@chromium.org <xians@chromium.org@0039d316-1c4b-4281-b951-d872f2087c98>2014-01-16 16:03:04 +0000
committerxians@chromium.org <xians@chromium.org@0039d316-1c4b-4281-b951-d872f2087c98>2014-01-16 16:03:04 +0000
commit8f53b2764181d98b6e83513bcce8b6c4769c43eb (patch)
tree826bab4610826f135f059ac09ff0e898ce8d7533
parent9f0dcd4e2bff29c194036a4fad568d6c71f31000 (diff)
downloadchromium_src-8f53b2764181d98b6e83513bcce8b6c4769c43eb.zip
chromium_src-8f53b2764181d98b6e83513bcce8b6c4769c43eb.tar.gz
chromium_src-8f53b2764181d98b6e83513bcce8b6c4769c43eb.tar.bz2
Cleaned up the WebRtcAudioCapturer a bit.
This CL cleaned up the capturer a bit by moving the initialize() method to private and do NOT allow re-initializing the capturer when a new getUserMedia is called. Instead, it creates a new capturer for each getUserMedia. BUG=333285 TEST=content_unittests Review URL: https://codereview.chromium.org/133903004 git-svn-id: svn://svn.chromium.org/chrome/trunk/src@245205 0039d316-1c4b-4281-b951-d872f2087c98
-rw-r--r--content/browser/media/webrtc_browsertest.cc12
-rw-r--r--content/renderer/media/media_stream_dependency_factory.cc48
-rw-r--r--content/renderer/media/media_stream_dependency_factory.h2
-rw-r--r--content/renderer/media/media_stream_impl.cc13
-rw-r--r--content/renderer/media/mock_media_stream_dependency_factory.cc12
-rw-r--r--content/renderer/media/mock_media_stream_dependency_factory.h2
-rw-r--r--content/renderer/media/rtc_peer_connection_handler.cc18
-rw-r--r--content/renderer/media/webrtc_audio_capturer.cc178
-rw-r--r--content/renderer/media/webrtc_audio_capturer.h101
-rw-r--r--content/renderer/media/webrtc_audio_capturer_unittest.cc18
-rw-r--r--content/renderer/media/webrtc_audio_device_impl.cc39
-rw-r--r--content/renderer/media/webrtc_audio_device_impl.h25
-rw-r--r--content/renderer/media/webrtc_audio_device_unittest.cc63
-rw-r--r--content/renderer/media/webrtc_local_audio_track_unittest.cc84
-rw-r--r--content/test/data/media/getusermedia.html59
15 files changed, 352 insertions, 322 deletions
diff --git a/content/browser/media/webrtc_browsertest.cc b/content/browser/media/webrtc_browsertest.cc
index 2c3e348..824c0c3 100644
--- a/content/browser/media/webrtc_browsertest.cc
+++ b/content/browser/media/webrtc_browsertest.cc
@@ -267,6 +267,18 @@ IN_PROC_BROWSER_TEST_F(WebrtcBrowserTest, GetAudioAndVideoStreamAndClone) {
ExpectTitle("OK");
}
+IN_PROC_BROWSER_TEST_F(WebrtcBrowserTest, TwoGetUserMediaAndStop) {
+ ASSERT_TRUE(embedded_test_server()->InitializeAndWaitUntilReady());
+
+ GURL url(embedded_test_server()->GetURL("/media/getusermedia.html"));
+ NavigateToURL(shell(), url);
+
+ ASSERT_TRUE(ExecuteJavascript(
+ "twoGetUserMediaAndStop({video: true, audio: true});"));
+
+ ExpectTitle("OK");
+}
+
IN_PROC_BROWSER_TEST_F(WebrtcBrowserTest, GetUserMediaWithMandatorySourceID_1) {
ASSERT_TRUE(embedded_test_server()->InitializeAndWaitUntilReady());
diff --git a/content/renderer/media/media_stream_dependency_factory.cc b/content/renderer/media/media_stream_dependency_factory.cc
index 31e8814..a55ed36 100644
--- a/content/renderer/media/media_stream_dependency_factory.cc
+++ b/content/renderer/media/media_stream_dependency_factory.cc
@@ -324,8 +324,7 @@ void MediaStreamDependencyFactory::CreateNativeMediaSources(
}
scoped_refptr<WebRtcAudioCapturer> capturer(
- MaybeCreateAudioCapturer(render_view_id, device_info,
- audio_constraints));
+ CreateAudioCapturer(render_view_id, device_info, audio_constraints));
if (!capturer.get()) {
DLOG(WARNING) << "Failed to create the capturer for device "
<< device_info.device.id;
@@ -420,14 +419,9 @@ MediaStreamDependencyFactory::CreateNativeAudioMediaStreamTrack(
}
}
- std::string track_id = base::UTF16ToUTF8(track.id());
- scoped_refptr<WebRtcAudioCapturer> capturer;
- if (GetWebRtcAudioDevice())
- capturer = GetWebRtcAudioDevice()->GetDefaultCapturer();
-
scoped_refptr<webrtc::AudioTrackInterface> audio_track(
- CreateLocalAudioTrack(track_id,
- capturer,
+ CreateLocalAudioTrack(track.id().utf8(),
+ source_data->GetAudioCapturer(),
webaudio_source.get(),
source_data->local_audio_source()));
AddNativeTrackToBlinkTrack(audio_track.get(), track, true);
@@ -894,7 +888,7 @@ void MediaStreamDependencyFactory::CleanupPeerConnectionFactory() {
}
scoped_refptr<WebRtcAudioCapturer>
-MediaStreamDependencyFactory::MaybeCreateAudioCapturer(
+MediaStreamDependencyFactory::CreateAudioCapturer(
int render_view_id,
const StreamDeviceInfo& device_info,
const blink::WebMediaConstraints& constraints) {
@@ -902,37 +896,9 @@ MediaStreamDependencyFactory::MaybeCreateAudioCapturer(
// view, for example, by an extension.
DCHECK_GE(render_view_id, 0);
- scoped_refptr<WebRtcAudioCapturer> capturer =
- GetWebRtcAudioDevice()->GetDefaultCapturer();
-
- // If the default capturer does not exist or |render_view_id| == -1, create
- // a new capturer.
- bool is_new_capturer = false;
- if (!capturer.get()) {
- capturer = WebRtcAudioCapturer::CreateCapturer();
- is_new_capturer = true;
- }
-
- if (!capturer->Initialize(
- render_view_id,
- static_cast<media::ChannelLayout>(
- device_info.device.input.channel_layout),
- device_info.device.input.sample_rate,
- device_info.device.input.frames_per_buffer,
- device_info.session_id,
- device_info.device.id,
- device_info.device.matched_output.sample_rate,
- device_info.device.matched_output.frames_per_buffer,
- device_info.device.input.effects,
- constraints)) {
- return NULL;
- }
-
- // Add the capturer to the WebRtcAudioDeviceImpl if it is a new capturer.
- if (is_new_capturer)
- GetWebRtcAudioDevice()->AddAudioCapturer(capturer);
-
- return capturer;
+ return WebRtcAudioCapturer::CreateCapturer(render_view_id, device_info,
+ constraints,
+ GetWebRtcAudioDevice());
}
void MediaStreamDependencyFactory::AddNativeTrackToBlinkTrack(
diff --git a/content/renderer/media/media_stream_dependency_factory.h b/content/renderer/media/media_stream_dependency_factory.h
index 0a0b120..22bbc54 100644
--- a/content/renderer/media/media_stream_dependency_factory.h
+++ b/content/renderer/media/media_stream_dependency_factory.h
@@ -218,7 +218,7 @@ class CONTENT_EXPORT MediaStreamDependencyFactory
// Returns a new capturer or existing capturer based on the |render_view_id|
// and |device_info|. When the |render_view_id| and |device_info| are valid,
// it reuses existing capture if any; otherwise it creates a new capturer.
- virtual scoped_refptr<WebRtcAudioCapturer> MaybeCreateAudioCapturer(
+ virtual scoped_refptr<WebRtcAudioCapturer> CreateAudioCapturer(
int render_view_id, const StreamDeviceInfo& device_info,
const blink::WebMediaConstraints& constraints);
diff --git a/content/renderer/media/media_stream_impl.cc b/content/renderer/media/media_stream_impl.cc
index f659aaf..f50432c 100644
--- a/content/renderer/media/media_stream_impl.cc
+++ b/content/renderer/media/media_stream_impl.cc
@@ -728,9 +728,8 @@ void MediaStreamImpl::StopLocalSource(
<< "{device_id = " << extra_data->device_info().device.id << "})";
if (source.type() == blink::WebMediaStreamSource::TypeAudio) {
- if (extra_data->GetAudioCapturer()) {
+ if (extra_data->GetAudioCapturer())
extra_data->GetAudioCapturer()->Stop();
- }
}
if (notify_dispatcher)
@@ -801,19 +800,13 @@ bool MediaStreamImpl::GetAuthorizedDeviceInfoForAudioRenderer(
int* output_sample_rate,
int* output_frames_per_buffer) {
DCHECK(CalledOnValidThread());
-
WebRtcAudioDeviceImpl* audio_device =
dependency_factory_->GetWebRtcAudioDevice();
if (!audio_device)
return false;
- if (!audio_device->GetDefaultCapturer())
- return false;
-
- return audio_device->GetDefaultCapturer()->GetPairedOutputParameters(
- session_id,
- output_sample_rate,
- output_frames_per_buffer);
+ return audio_device->GetAuthorizedDeviceInfoForAudioRenderer(
+ session_id, output_sample_rate, output_frames_per_buffer);
}
MediaStreamSourceExtraData::MediaStreamSourceExtraData(
diff --git a/content/renderer/media/mock_media_stream_dependency_factory.cc b/content/renderer/media/mock_media_stream_dependency_factory.cc
index 8b5ee8b..bdcb69a 100644
--- a/content/renderer/media/mock_media_stream_dependency_factory.cc
+++ b/content/renderer/media/mock_media_stream_dependency_factory.cc
@@ -480,9 +480,12 @@ MockMediaStreamDependencyFactory::CreateLocalAudioTrack(
WebAudioCapturerSource* webaudio_source,
webrtc::AudioSourceInterface* source) {
DCHECK(mock_pc_factory_created_);
- DCHECK(!capturer.get());
+ blink::WebMediaConstraints constraints;
+ scoped_refptr<WebRtcAudioCapturer> audio_capturer = capturer ?
+ capturer : WebRtcAudioCapturer::CreateCapturer(-1, StreamDeviceInfo(),
+ constraints, NULL);
return WebRtcLocalAudioTrack::Create(
- id, WebRtcAudioCapturer::CreateCapturer(), webaudio_source, source);
+ id, audio_capturer, webaudio_source, source);
}
SessionDescriptionInterface*
@@ -502,10 +505,11 @@ MockMediaStreamDependencyFactory::CreateIceCandidate(
}
scoped_refptr<WebRtcAudioCapturer>
-MockMediaStreamDependencyFactory::MaybeCreateAudioCapturer(
+MockMediaStreamDependencyFactory::CreateAudioCapturer(
int render_view_id, const StreamDeviceInfo& device_info,
const blink::WebMediaConstraints& constraints) {
- return WebRtcAudioCapturer::CreateCapturer();
+ return WebRtcAudioCapturer::CreateCapturer(-1, device_info,
+ constraints, NULL);
}
} // namespace content
diff --git a/content/renderer/media/mock_media_stream_dependency_factory.h b/content/renderer/media/mock_media_stream_dependency_factory.h
index 2ef8b59..f9f3c33 100644
--- a/content/renderer/media/mock_media_stream_dependency_factory.h
+++ b/content/renderer/media/mock_media_stream_dependency_factory.h
@@ -184,7 +184,7 @@ class MockMediaStreamDependencyFactory : public MediaStreamDependencyFactory {
virtual bool EnsurePeerConnectionFactory() OVERRIDE;
virtual bool PeerConnectionFactoryCreated() OVERRIDE;
- virtual scoped_refptr<WebRtcAudioCapturer> MaybeCreateAudioCapturer(
+ virtual scoped_refptr<WebRtcAudioCapturer> CreateAudioCapturer(
int render_view_id, const StreamDeviceInfo& device_info,
const blink::WebMediaConstraints& constraints) OVERRIDE;
diff --git a/content/renderer/media/rtc_peer_connection_handler.cc b/content/renderer/media/rtc_peer_connection_handler.cc
index 08f71c0..6548faa 100644
--- a/content/renderer/media/rtc_peer_connection_handler.cc
+++ b/content/renderer/media/rtc_peer_connection_handler.cc
@@ -15,6 +15,7 @@
#include "base/strings/utf_string_conversions.h"
#include "content/public/common/content_switches.h"
#include "content/renderer/media/media_stream_dependency_factory.h"
+#include "content/renderer/media/media_stream_source_extra_data.h"
#include "content/renderer/media/peer_connection_tracker.h"
#include "content/renderer/media/remote_media_stream_impl.h"
#include "content/renderer/media/rtc_data_channel_handler.h"
@@ -549,13 +550,16 @@ bool RTCPeerConnectionHandler::addStream(
this, stream, PeerConnectionTracker::SOURCE_LOCAL);
// A media stream is connected to a peer connection, enable the
- // peer connection mode for the capturer.
- WebRtcAudioDeviceImpl* audio_device =
- dependency_factory_->GetWebRtcAudioDevice();
- if (audio_device) {
- WebRtcAudioCapturer* capturer = audio_device->GetDefaultCapturer();
- if (capturer)
- capturer->EnablePeerConnectionMode();
+ // peer connection mode for the sources.
+ blink::WebVector<blink::WebMediaStreamTrack> audio_tracks;
+ stream.audioTracks(audio_tracks);
+ for (size_t i = 0; i < audio_tracks.size(); ++i) {
+ const blink::WebMediaStreamSource& source = audio_tracks[i].source();
+ MediaStreamSourceExtraData* extra_data =
+ static_cast<MediaStreamSourceExtraData*>(source.extraData());
+ // |extra_data| is NULL if the track is a remote audio track.
+ if (extra_data && extra_data->GetAudioCapturer())
+ extra_data->GetAudioCapturer()->EnablePeerConnectionMode();
}
return AddStream(stream, &constraints);
diff --git a/content/renderer/media/webrtc_audio_capturer.cc b/content/renderer/media/webrtc_audio_capturer.cc
index 617db97..ff4ecc7 100644
--- a/content/renderer/media/webrtc_audio_capturer.cc
+++ b/content/renderer/media/webrtc_audio_capturer.cc
@@ -113,55 +113,47 @@ class WebRtcAudioCapturer::TrackOwner
};
// static
-scoped_refptr<WebRtcAudioCapturer> WebRtcAudioCapturer::CreateCapturer() {
- scoped_refptr<WebRtcAudioCapturer> capturer = new WebRtcAudioCapturer();
- return capturer;
+scoped_refptr<WebRtcAudioCapturer> WebRtcAudioCapturer::CreateCapturer(
+ int render_view_id, const StreamDeviceInfo& device_info,
+ const blink::WebMediaConstraints& constraints,
+ WebRtcAudioDeviceImpl* audio_device) {
+ scoped_refptr<WebRtcAudioCapturer> capturer = new WebRtcAudioCapturer(
+ render_view_id, device_info, constraints, audio_device);
+ if (capturer->Initialize())
+ return capturer;
+
+ return NULL;
}
-bool WebRtcAudioCapturer::Initialize(int render_view_id,
- media::ChannelLayout channel_layout,
- int sample_rate,
- int buffer_size,
- int session_id,
- const std::string& device_id,
- int paired_output_sample_rate,
- int paired_output_frames_per_buffer,
- int effects,
- const blink::WebMediaConstraints& constraints) {
+bool WebRtcAudioCapturer::Initialize() {
DCHECK(thread_checker_.CalledOnValidThread());
DVLOG(1) << "WebRtcAudioCapturer::Initialize()";
-
- DVLOG(1) << "Audio input hardware channel layout: " << channel_layout;
- UMA_HISTOGRAM_ENUMERATION("WebRTC.AudioInputChannelLayout",
- channel_layout, media::CHANNEL_LAYOUT_MAX);
-
WebRtcLogMessage(base::StringPrintf(
"WAC::Initialize. render_view_id=%d"
", channel_layout=%d, sample_rate=%d, buffer_size=%d"
", session_id=%d, paired_output_sample_rate=%d"
- ", paired_output_frames_per_buffer=%d",
- render_view_id,
- channel_layout,
- sample_rate,
- buffer_size,
- session_id,
- paired_output_sample_rate,
- paired_output_frames_per_buffer));
-
- render_view_id_ = render_view_id;
- session_id_ = session_id;
- device_id_ = device_id;
- hardware_buffer_size_ = buffer_size;
- output_sample_rate_ = paired_output_sample_rate;
- output_frames_per_buffer_= paired_output_frames_per_buffer;
- constraints_ = constraints;
-
- if (render_view_id == -1) {
- // Return true here to allow injecting a new source via SetCapturerSource()
- // at a later state.
+ ", paired_output_frames_per_buffer=%d, effects=%d. ",
+ render_view_id_,
+ device_info_.device.input.channel_layout,
+ device_info_.device.input.sample_rate,
+ device_info_.device.input.frames_per_buffer,
+ device_info_.session_id,
+ device_info_.device.matched_output.sample_rate,
+ device_info_.device.matched_output.frames_per_buffer,
+ device_info_.device.input.effects));
+
+ if (render_view_id_ == -1) {
+ // Return true here to allow injecting a new source via
+ // SetCapturerSourceForTesting() at a later state.
return true;
}
+ media::ChannelLayout channel_layout = static_cast<media::ChannelLayout>(
+ device_info_.device.input.channel_layout);
+ DVLOG(1) << "Audio input hardware channel layout: " << channel_layout;
+ UMA_HISTOGRAM_ENUMERATION("WebRTC.AudioInputChannelLayout",
+ channel_layout, media::CHANNEL_LAYOUT_MAX);
+
// Verify that the reported input channel configuration is supported.
if (channel_layout != media::CHANNEL_LAYOUT_MONO &&
channel_layout != media::CHANNEL_LAYOUT_STEREO) {
@@ -170,48 +162,58 @@ bool WebRtcAudioCapturer::Initialize(int render_view_id,
return false;
}
- DVLOG(1) << "Audio input hardware sample rate: " << sample_rate;
- media::AudioSampleRate asr = media::AsAudioSampleRate(sample_rate);
+ DVLOG(1) << "Audio input hardware sample rate: "
+ << device_info_.device.input.sample_rate;
+ media::AudioSampleRate asr = media::AsAudioSampleRate(
+ device_info_.device.input.sample_rate);
if (asr != media::kUnexpectedAudioSampleRate) {
UMA_HISTOGRAM_ENUMERATION(
"WebRTC.AudioInputSampleRate", asr, media::kUnexpectedAudioSampleRate);
} else {
- UMA_HISTOGRAM_COUNTS("WebRTC.AudioInputSampleRateUnexpected", sample_rate);
+ UMA_HISTOGRAM_COUNTS("WebRTC.AudioInputSampleRateUnexpected",
+ device_info_.device.input.sample_rate);
}
// Verify that the reported input hardware sample rate is supported
// on the current platform.
if (std::find(&kValidInputRates[0],
&kValidInputRates[0] + arraysize(kValidInputRates),
- sample_rate) ==
+ device_info_.device.input.sample_rate) ==
&kValidInputRates[arraysize(kValidInputRates)]) {
- DLOG(ERROR) << sample_rate << " is not a supported input rate.";
+ DLOG(ERROR) << device_info_.device.input.sample_rate
+ << " is not a supported input rate.";
return false;
}
- // Create and configure the default audio capturing source. The |source_|
- // will be overwritten if an external client later calls SetCapturerSource()
- // providing an alternative media::AudioCapturerSource.
- SetCapturerSource(AudioDeviceFactory::NewInputDevice(render_view_id),
+ // Create and configure the default audio capturing source.
+ SetCapturerSource(AudioDeviceFactory::NewInputDevice(render_view_id_),
channel_layout,
- static_cast<float>(sample_rate),
- effects,
- constraints);
+ static_cast<float>(device_info_.device.input.sample_rate),
+ device_info_.device.input.effects,
+ constraints_);
+
+ // Add the capturer to the WebRtcAudioDeviceImpl since it needs some hardware
+ // information from the capturer.
+ if (audio_device_)
+ audio_device_->AddAudioCapturer(this);
return true;
}
-WebRtcAudioCapturer::WebRtcAudioCapturer()
- : running_(false),
- render_view_id_(-1),
- hardware_buffer_size_(0),
- session_id_(0),
+WebRtcAudioCapturer::WebRtcAudioCapturer(
+ int render_view_id,
+ const StreamDeviceInfo& device_info,
+ const blink::WebMediaConstraints& constraints,
+ WebRtcAudioDeviceImpl* audio_device)
+ : constraints_(constraints),
+ running_(false),
+ render_view_id_(render_view_id),
+ device_info_(device_info),
volume_(0),
peer_connection_mode_(false),
- output_sample_rate_(0),
- output_frames_per_buffer_(0),
key_pressed_(false),
- need_audio_processing_(false) {
+ need_audio_processing_(false),
+ audio_device_(audio_device) {
DVLOG(1) << "WebRtcAudioCapturer::WebRtcAudioCapturer()";
}
@@ -245,26 +247,16 @@ void WebRtcAudioCapturer::AddTrack(WebRtcLocalAudioTrack* track) {
void WebRtcAudioCapturer::RemoveTrack(WebRtcLocalAudioTrack* track) {
DCHECK(thread_checker_.CalledOnValidThread());
+ base::AutoLock auto_lock(lock_);
- bool stop_source = false;
- {
- base::AutoLock auto_lock(lock_);
-
- scoped_refptr<TrackOwner> removed_item =
- tracks_.Remove(TrackOwner::TrackWrapper(track));
-
- // Clear the delegate to ensure that no more capture callbacks will
- // be sent to this sink. Also avoids a possible crash which can happen
- // if this method is called while capturing is active.
- if (removed_item.get())
- removed_item->Reset();
-
- // Stop the source if the last audio track is going away.
- stop_source = tracks_.IsEmpty();
- }
+ scoped_refptr<TrackOwner> removed_item =
+ tracks_.Remove(TrackOwner::TrackWrapper(track));
- if (stop_source)
- Stop();
+ // Clear the delegate to ensure that no more capture callbacks will
+ // be sent to this sink. Also avoids a possible crash which can happen
+ // if this method is called while capturing is active.
+ if (removed_item.get())
+ removed_item->Reset();
}
void WebRtcAudioCapturer::SetCapturerSource(
@@ -316,7 +308,7 @@ void WebRtcAudioCapturer::SetCapturerSource(
}
if (source.get())
- source->Initialize(params, this, session_id_);
+ source->Initialize(params, this, session_id());
if (restart_source)
Start();
@@ -386,6 +378,10 @@ void WebRtcAudioCapturer::Stop() {
running_ = false;
}
+ // Remove the capturer object from the WebRtcAudioDeviceImpl.
+ if (audio_device_)
+ audio_device_->RemoveAudioCapturer(this);
+
for (TrackList::ItemList::const_iterator it = tracks.begin();
it != tracks.end();
++it) {
@@ -510,12 +506,15 @@ bool WebRtcAudioCapturer::GetPairedOutputParameters(
int* output_sample_rate,
int* output_frames_per_buffer) const {
// Don't set output parameters unless all of them are valid.
- if (session_id_ <= 0 || !output_sample_rate_ || !output_frames_per_buffer_)
+ if (device_info_.session_id <= 0 ||
+ !device_info_.device.matched_output.sample_rate ||
+ !device_info_.device.matched_output.frames_per_buffer)
return false;
- *session_id = session_id_;
- *output_sample_rate = output_sample_rate_;
- *output_frames_per_buffer = output_frames_per_buffer_;
+ *session_id = device_info_.session_id;
+ *output_sample_rate = device_info_.device.matched_output.sample_rate;
+ *output_frames_per_buffer =
+ device_info_.device.matched_output.frames_per_buffer;
return true;
}
@@ -534,9 +533,10 @@ int WebRtcAudioCapturer::GetBufferSize(int sample_rate) const {
// Use the native hardware buffer size in non peer connection mode when the
// platform is using a native buffer size smaller than the PeerConnection
// buffer size.
- if (!peer_connection_mode_ && hardware_buffer_size_ &&
- hardware_buffer_size_ <= peer_connection_buffer_size) {
- return hardware_buffer_size_;
+ int hardware_buffer_size = device_info_.device.input.frames_per_buffer;
+ if (!peer_connection_mode_ && hardware_buffer_size &&
+ hardware_buffer_size <= peer_connection_buffer_size) {
+ return hardware_buffer_size;
}
return (sample_rate / 100);
@@ -571,4 +571,14 @@ void WebRtcAudioCapturer::FeedRenderDataToAudioProcessor(
render_delay);
}
+void WebRtcAudioCapturer::SetCapturerSourceForTesting(
+ const scoped_refptr<media::AudioCapturerSource>& source,
+ media::AudioParameters params) {
+ // Create a new audio stream as source which uses the new source.
+ SetCapturerSource(source, params.channel_layout(),
+ static_cast<float>(params.sample_rate()),
+ params.effects(),
+ constraints_);
+}
+
} // namespace content
diff --git a/content/renderer/media/webrtc_audio_capturer.h b/content/renderer/media/webrtc_audio_capturer.h
index 1763a37..986016d 100644
--- a/content/renderer/media/webrtc_audio_capturer.h
+++ b/content/renderer/media/webrtc_audio_capturer.h
@@ -13,8 +13,8 @@
#include "base/synchronization/lock.h"
#include "base/threading/thread_checker.h"
#include "base/time/time.h"
+#include "content/common/media/media_stream_options.h"
#include "content/renderer/media/tagged_list.h"
-#include "content/renderer/media/webrtc_audio_device_impl.h"
#include "media/audio/audio_input_device.h"
#include "media/base/audio_capturer_source.h"
#include "third_party/WebKit/public/platform/WebMediaConstraints.h"
@@ -26,13 +26,12 @@ class AudioBus;
namespace content {
class MediaStreamAudioProcessor;
+class WebRtcAudioDeviceImpl;
class WebRtcLocalAudioRenderer;
class WebRtcLocalAudioTrack;
// This class manages the capture data flow by getting data from its
// |source_|, and passing it to its |tracks_|.
-// It allows clients to inject their own capture data source by calling
-// SetCapturerSource().
// The threading model for this class is rather complex since it will be
// created on the main render thread, captured data is provided on a dedicated
// AudioInputDevice thread, and methods can be called either on the Libjingle
@@ -42,35 +41,28 @@ class CONTENT_EXPORT WebRtcAudioCapturer
: public base::RefCountedThreadSafe<WebRtcAudioCapturer>,
NON_EXPORTED_BASE(public media::AudioCapturerSource::CaptureCallback) {
public:
- // Use to construct the audio capturer.
+ // Used to construct the audio capturer. |render_view_id| specifies the
+ // render view consuming audio for capture, |render_view_id| as -1 is used
+ // by the unittests to skip creating a source via
+ // AudioDeviceFactory::NewInputDevice(), and allow injecting their own source
+ // via SetCapturerSourceForTesting() at a later state. |device_info|
+ // contains all the device information that the capturer is created for.
+ // |constraints| contains the settings for audio processing.
+ // TODO(xians): Implement the interface for the audio source and move the
+ // |constraints| to ApplyConstraints().
// Called on the main render thread.
- static scoped_refptr<WebRtcAudioCapturer> CreateCapturer();
+ static scoped_refptr<WebRtcAudioCapturer> CreateCapturer(
+ int render_view_id,
+ const StreamDeviceInfo& device_info,
+ const blink::WebMediaConstraints& constraints,
+ WebRtcAudioDeviceImpl* audio_device);
- // Creates and configures the default audio capturing source using the
- // provided audio parameters. |render_view_id| specifies the render view
- // consuming audio for capture. |session_id| is passed to the browser to
- // decide which device to use. |device_id| is used to identify which device
- // the capturer is created for. Called on the main render thread.
- // TODO(xians): Implement the interface for the audio source and move the
- // |constraints| to AddTrack().
- bool Initialize(int render_view_id,
- media::ChannelLayout channel_layout,
- int sample_rate,
- int buffer_size,
- int session_id,
- const std::string& device_id,
- int paired_output_sample_rate,
- int paired_output_frames_per_buffer,
- int effects,
- const blink::WebMediaConstraints& constraints);
// Add a audio track to the sinks of the capturer.
// WebRtcAudioDeviceImpl calls this method on the main render thread but
// other clients may call it from other threads. The current implementation
// does not support multi-thread calling.
// The first AddTrack will implicitly trigger the Start() of this object.
- // Called on the main render thread or libjingle working thread.
- // TODO(xians): Pass the track constraints via AddTrack().
void AddTrack(WebRtcLocalAudioTrack* track);
// Remove a audio track from the sinks of the capturer.
@@ -79,17 +71,6 @@ class CONTENT_EXPORT WebRtcAudioCapturer
// Called on the main render thread or libjingle working thread.
void RemoveTrack(WebRtcLocalAudioTrack* track);
- // SetCapturerSource() is called if the client on the source side desires to
- // provide their own captured audio data. Client is responsible for calling
- // Start() on its own source to have the ball rolling.
- // Called on the main render thread.
- void SetCapturerSource(
- const scoped_refptr<media::AudioCapturerSource>& source,
- media::ChannelLayout channel_layout,
- float sample_rate,
- int effects,
- const blink::WebMediaConstraints& constraints);
-
// Called when a stream is connecting to a peer connection. This will set
// up the native buffer size for the stream in order to optimize the
// performance for peer connection.
@@ -101,7 +82,6 @@ class CONTENT_EXPORT WebRtcAudioCapturer
int Volume() const;
int MaxVolume() const;
- bool is_recording() const { return running_; }
// Audio parameters utilized by the source of the audio capturer.
// TODO(phoglund): Think over the implications of this accessor and if we can
@@ -114,8 +94,8 @@ class CONTENT_EXPORT WebRtcAudioCapturer
int* output_sample_rate,
int* output_frames_per_buffer) const;
- const std::string& device_id() const { return device_id_; }
- int session_id() const { return session_id_; }
+ const std::string& device_id() const { return device_info_.device.id; }
+ int session_id() const { return device_info_.session_id; }
// Stops recording audio. This method will empty its track lists since
// stopping the capturer will implicitly invalidate all its tracks.
@@ -137,15 +117,24 @@ class CONTENT_EXPORT WebRtcAudioCapturer
int number_of_frames,
base::TimeDelta render_delay);
+ // Use by the unittests to inject their own source to the capturer.
+ void SetCapturerSourceForTesting(
+ const scoped_refptr<media::AudioCapturerSource>& source,
+ media::AudioParameters params);
+
protected:
friend class base::RefCountedThreadSafe<WebRtcAudioCapturer>;
- WebRtcAudioCapturer();
virtual ~WebRtcAudioCapturer();
private:
class TrackOwner;
typedef TaggedList<TrackOwner> TrackList;
+ WebRtcAudioCapturer(int render_view_id,
+ const StreamDeviceInfo& device_info,
+ const blink::WebMediaConstraints& constraints,
+ WebRtcAudioDeviceImpl* audio_device);
+
// AudioCapturerSource::CaptureCallback implementation.
// Called on the AudioInputDevice audio thread.
virtual void Capture(media::AudioBus* audio_source,
@@ -154,6 +143,21 @@ class CONTENT_EXPORT WebRtcAudioCapturer
bool key_pressed) OVERRIDE;
virtual void OnCaptureError() OVERRIDE;
+ // Initializes the default audio capturing source using the provided render
+ // view id and device information. Return true if success, otherwise false.
+ bool Initialize();
+
+ // SetCapturerSource() is called if the client on the source side desires to
+ // provide their own captured audio data. Client is responsible for calling
+ // Start() on its own source to have the ball rolling.
+ // Called on the main render thread.
+ void SetCapturerSource(
+ const scoped_refptr<media::AudioCapturerSource>& source,
+ media::ChannelLayout channel_layout,
+ float sample_rate,
+ int effects,
+ const blink::WebMediaConstraints& constraints);
+
// Starts recording audio.
// Triggered by AddSink() on the main render thread or a Libjingle working
// thread. It should NOT be called under |lock_|.
@@ -189,16 +193,8 @@ class CONTENT_EXPORT WebRtcAudioCapturer
int render_view_id_;
- // Cached value for the hardware native buffer size, used when
- // |peer_connection_mode_| is set to false.
- int hardware_buffer_size_;
-
- // The media session ID used to identify which input device to be started by
- // the browser.
- int session_id_;
-
- // The device this capturer is given permission to use.
- std::string device_id_;
+ // Cached information of the device used by the capturer.
+ const StreamDeviceInfo device_info_;
// Stores latest microphone volume received in a CaptureData() callback.
// Range is [0, 255].
@@ -207,9 +203,6 @@ class CONTENT_EXPORT WebRtcAudioCapturer
// Flag which affects the buffer size used by the capturer.
bool peer_connection_mode_;
- int output_sample_rate_;
- int output_frames_per_buffer_;
-
// Cache value for the audio processing params.
base::TimeDelta audio_delay_;
bool key_pressed_;
@@ -217,6 +210,10 @@ class CONTENT_EXPORT WebRtcAudioCapturer
// Flag to help deciding if the data needs audio processing.
bool need_audio_processing_;
+ // Raw pointer to the WebRtcAudioDeviceImpl, which is valid for the lifetime
+ // of RenderThread.
+ WebRtcAudioDeviceImpl* audio_device_;
+
DISALLOW_COPY_AND_ASSIGN(WebRtcAudioCapturer);
};
diff --git a/content/renderer/media/webrtc_audio_capturer_unittest.cc b/content/renderer/media/webrtc_audio_capturer_unittest.cc
index 5d9d195..d227186 100644
--- a/content/renderer/media/webrtc_audio_capturer_unittest.cc
+++ b/content/renderer/media/webrtc_audio_capturer_unittest.cc
@@ -62,17 +62,17 @@ class WebRtcAudioCapturerTest : public testing::Test {
: params_(media::AudioParameters::AUDIO_PCM_LOW_LATENCY,
media::CHANNEL_LAYOUT_STEREO, 48000, 16, 128) {
#endif
- capturer_ = WebRtcAudioCapturer::CreateCapturer();
blink::WebMediaConstraints constraints;
- capturer_->Initialize(-1, params_.channel_layout(), params_.sample_rate(),
- params_.frames_per_buffer(), 0, std::string(), 0, 0,
- params_.effects(), constraints);
+ capturer_ = WebRtcAudioCapturer::CreateCapturer(
+ -1, StreamDeviceInfo(MEDIA_DEVICE_AUDIO_CAPTURE,
+ "", "", params_.sample_rate(),
+ params_.channel_layout(),
+ params_.frames_per_buffer()),
+ constraints,
+ NULL);
capturer_source_ = new MockCapturerSource();
- EXPECT_CALL(*capturer_source_.get(), Initialize(_, capturer_.get(), 0));
- capturer_->SetCapturerSource(capturer_source_,
- params_.channel_layout(),
- params_.sample_rate(),
- params_.effects(), constraints);
+ EXPECT_CALL(*capturer_source_.get(), Initialize(_, capturer_.get(), -1));
+ capturer_->SetCapturerSourceForTesting(capturer_source_, params_);
EXPECT_CALL(*capturer_source_.get(), SetAutomaticGainControl(true));
EXPECT_CALL(*capturer_source_.get(), Start());
diff --git a/content/renderer/media/webrtc_audio_device_impl.cc b/content/renderer/media/webrtc_audio_device_impl.cc
index 9dd9556..0a34cd3 100644
--- a/content/renderer/media/webrtc_audio_device_impl.cc
+++ b/content/renderer/media/webrtc_audio_device_impl.cc
@@ -436,24 +436,47 @@ void WebRtcAudioDeviceImpl::AddAudioCapturer(
DVLOG(1) << "WebRtcAudioDeviceImpl::AddAudioCapturer()";
DCHECK(thread_checker_.CalledOnValidThread());
DCHECK(capturer.get());
-
- // We only support one microphone today, which means the list can contain
- // only one capturer with a valid device id.
- DCHECK(capturer->device_id().empty() || !GetDefaultCapturer());
+ DCHECK(!capturer->device_id().empty());
base::AutoLock auto_lock(lock_);
+ DCHECK(std::find(capturers_.begin(), capturers_.end(), capturer) ==
+ capturers_.end());
capturers_.push_back(capturer);
}
+void WebRtcAudioDeviceImpl::RemoveAudioCapturer(
+ const scoped_refptr<WebRtcAudioCapturer>& capturer) {
+ DVLOG(1) << "WebRtcAudioDeviceImpl::AddAudioCapturer()";
+ DCHECK(thread_checker_.CalledOnValidThread());
+ DCHECK(capturer.get());
+ base::AutoLock auto_lock(lock_);
+ capturers_.remove(capturer);
+}
+
scoped_refptr<WebRtcAudioCapturer>
WebRtcAudioDeviceImpl::GetDefaultCapturer() const {
base::AutoLock auto_lock(lock_);
- for (CapturerList::const_iterator iter = capturers_.begin();
- iter != capturers_.end(); ++iter) {
- if (!(*iter)->device_id().empty())
- return *iter;
+ // Use the last |capturer| which is from the latest getUserMedia call as
+ // the default capture device.
+ for (CapturerList::const_reverse_iterator iter = capturers_.rbegin();
+ iter != capturers_.rend(); ++iter) {
+ return *iter;
}
return NULL;
}
+bool WebRtcAudioDeviceImpl::GetAuthorizedDeviceInfoForAudioRenderer(
+ int* session_id,
+ int* output_sample_rate,
+ int* output_frames_per_buffer) {
+ DCHECK(thread_checker_.CalledOnValidThread());
+ // If there is no capturer or there are more than one open capture devices,
+ // return false.
+ if (capturers_.empty() || capturers_.size() > 1)
+ return false;
+
+ return GetDefaultCapturer()->GetPairedOutputParameters(
+ session_id, output_sample_rate, output_frames_per_buffer);
+}
+
} // namespace content
diff --git a/content/renderer/media/webrtc_audio_device_impl.h b/content/renderer/media/webrtc_audio_device_impl.h
index 1d904fa..f9279f5 100644
--- a/content/renderer/media/webrtc_audio_device_impl.h
+++ b/content/renderer/media/webrtc_audio_device_impl.h
@@ -299,14 +299,21 @@ class CONTENT_EXPORT WebRtcAudioDeviceImpl
// Called on the main renderer thread.
bool SetAudioRenderer(WebRtcAudioRenderer* renderer);
- // Adds the capturer to the ADM.
+ // Adds/Removes the capturer to the ADM.
+ // TODO(xians): Remove these two methods once the ADM does not need to pass
+ // hardware information up to WebRtc.
void AddAudioCapturer(const scoped_refptr<WebRtcAudioCapturer>& capturer);
-
- // Gets the default capturer, which is the capturer in the list with
- // a valid |device_id|. Microphones are represented by capturers with a valid
- // |device_id|, since only one microphone is supported today, only one
- // capturer in the |capturers_| can have a valid |device_id|.
- scoped_refptr<WebRtcAudioCapturer> GetDefaultCapturer() const;
+ void RemoveAudioCapturer(const scoped_refptr<WebRtcAudioCapturer>& capturer);
+
+ // Gets paired device information of the capture device for the audio
+ // renderer. This is used to pass on a session id, sample rate and buffer
+ // size to a webrtc audio renderer (either local or remote), so that audio
+ // will be rendered to a matching output device.
+ // Returns true if the capture device has a paired output device, otherwise
+ // false. Note that if there are more than one open capture device the
+ // function will not be able to pick an appropriate device and return false.
+ bool GetAuthorizedDeviceInfoForAudioRenderer(
+ int* session_id, int* output_sample_rate, int* output_buffer_size);
const scoped_refptr<WebRtcAudioRenderer>& renderer() const {
return renderer_;
@@ -355,6 +362,10 @@ class CONTENT_EXPORT WebRtcAudioDeviceImpl
virtual void SetRenderFormat(const media::AudioParameters& params) OVERRIDE;
virtual void RemoveAudioRenderer(WebRtcAudioRenderer* renderer) OVERRIDE;
+ // Helper to get the default capturer, which is the last capturer in
+ // |capturers_|.
+ scoped_refptr<WebRtcAudioCapturer> GetDefaultCapturer() const;
+
// Used to DCHECK that we are called on the correct thread.
base::ThreadChecker thread_checker_;
diff --git a/content/renderer/media/webrtc_audio_device_unittest.cc b/content/renderer/media/webrtc_audio_device_unittest.cc
index 4845ebb..5545ec7 100644
--- a/content/renderer/media/webrtc_audio_device_unittest.cc
+++ b/content/renderer/media/webrtc_audio_device_unittest.cc
@@ -104,33 +104,28 @@ bool HardwareSampleRatesAreValid() {
return true;
}
-// Utility method which creates and initializes the audio capturer and adds it
-// to WebRTC audio device. This method should be used in tests where
+// Utility method which creates the audio capturer, it returns a scoped
+// reference of the capturer if it is created successfully, otherwise it returns
+// NULL. This method should be used in tests where
// HardwareSampleRatesAreValid() has been called and returned true.
-bool CreateAndInitializeCapturer(WebRtcAudioDeviceImpl* webrtc_audio_device) {
- DCHECK(webrtc_audio_device);
- scoped_refptr<WebRtcAudioCapturer> capturer(
- WebRtcAudioCapturer::CreateCapturer());
-
+scoped_refptr<WebRtcAudioCapturer> CreateAudioCapturer(
+ WebRtcAudioDeviceImpl* webrtc_audio_device) {
media::AudioHardwareConfig* hardware_config =
RenderThreadImpl::current()->GetAudioHardwareConfig();
-
// Use native capture sample rate and channel configuration to get some
// action in this test.
int sample_rate = hardware_config->GetInputSampleRate();
media::ChannelLayout channel_layout =
hardware_config->GetInputChannelLayout();
blink::WebMediaConstraints constraints;
- if (!capturer->Initialize(kRenderViewId, channel_layout, sample_rate, 0, 1,
- media::AudioManagerBase::kDefaultDeviceId, 0, 0,
- media::AudioParameters::NO_EFFECTS, constraints)) {
- return false;
- }
-
- // Add the capturer to the WebRtcAudioDeviceImpl.
- webrtc_audio_device->AddAudioCapturer(capturer);
-
- return true;
+ StreamDeviceInfo device(MEDIA_DEVICE_AUDIO_CAPTURE,
+ media::AudioManagerBase::kDefaultDeviceName,
+ media::AudioManagerBase::kDefaultDeviceId,
+ sample_rate, channel_layout, 0);
+ device.session_id = 1;
+ return WebRtcAudioCapturer::CreateCapturer(kRenderViewId, device,
+ constraints,
+ webrtc_audio_device);
}
// Create and start a local audio track. Starting the audio track will connect
@@ -490,7 +485,7 @@ TEST_F(MAYBE_WebRTCAudioDeviceTest, Construct) {
ScopedWebRTCPtr<webrtc::VoEBase> base(engine.get());
int err = base->Init(webrtc_audio_device.get());
- EXPECT_TRUE(CreateAndInitializeCapturer(webrtc_audio_device.get()));
+ EXPECT_TRUE(CreateAudioCapturer(webrtc_audio_device) != NULL);
EXPECT_EQ(0, err);
EXPECT_EQ(0, base->Terminate());
}
@@ -639,15 +634,15 @@ TEST_F(MAYBE_WebRTCAudioDeviceTest, MAYBE_StartRecording) {
EXPECT_EQ(0, network->RegisterExternalTransport(ch, *transport.get()));
EXPECT_EQ(0, base->StartSend(ch));
- // Create and initialize the capturer which starts the source of the data
- // flow.
- EXPECT_TRUE(CreateAndInitializeCapturer(webrtc_audio_device.get()));
+ // Create the capturer which starts the source of the data flow.
+ scoped_refptr<WebRtcAudioCapturer> capturer(
+ CreateAudioCapturer(webrtc_audio_device));
+ EXPECT_TRUE(capturer);
// Create and start a local audio track which is bridging the data flow
// between the capturer and WebRtcAudioDeviceImpl.
scoped_refptr<WebRtcLocalAudioTrack> local_audio_track(
- CreateAndStartLocalAudioTrack(webrtc_audio_device->GetDefaultCapturer(),
- webrtc_audio_device));
+ CreateAndStartLocalAudioTrack(capturer, webrtc_audio_device));
// connect the VoE voice channel to the audio track
static_cast<webrtc::AudioTrackInterface*>(local_audio_track.get())->
GetRenderer()->AddChannel(ch);
@@ -667,7 +662,7 @@ TEST_F(MAYBE_WebRTCAudioDeviceTest, MAYBE_StartRecording) {
ch, webrtc::kRecordingPerChannel));
EXPECT_EQ(0, base->StopSend(ch));
- webrtc_audio_device->GetDefaultCapturer()->Stop();
+ capturer->Stop();
EXPECT_EQ(0, base->DeleteChannel(ch));
EXPECT_EQ(0, base->Terminate());
}
@@ -796,10 +791,11 @@ TEST_F(MAYBE_WebRTCAudioDeviceTest, MAYBE_FullDuplexAudioWithAGC) {
int ch = base->CreateChannel();
EXPECT_NE(-1, ch);
- EXPECT_TRUE(CreateAndInitializeCapturer(webrtc_audio_device.get()));
+ scoped_refptr<WebRtcAudioCapturer> capturer(
+ CreateAudioCapturer(webrtc_audio_device));
+ EXPECT_TRUE(capturer);
scoped_refptr<WebRtcLocalAudioTrack> local_audio_track(
- CreateAndStartLocalAudioTrack(webrtc_audio_device->GetDefaultCapturer(),
- webrtc_audio_device));
+ CreateAndStartLocalAudioTrack(capturer, webrtc_audio_device));
// connect the VoE voice channel to the audio track
static_cast<webrtc::AudioTrackInterface*>(local_audio_track.get())->
GetRenderer()->AddChannel(ch);
@@ -825,7 +821,7 @@ TEST_F(MAYBE_WebRTCAudioDeviceTest, MAYBE_FullDuplexAudioWithAGC) {
base::TimeDelta::FromSeconds(2));
message_loop_.Run();
- webrtc_audio_device->GetDefaultCapturer()->Stop();
+ capturer->Stop();
proxy->Stop();
EXPECT_EQ(0, base->StopSend(ch));
EXPECT_EQ(0, base->StopPlayout(ch));
@@ -862,7 +858,9 @@ TEST_F(MAYBE_WebRTCAudioDeviceTest, DISABLED_WebRtcRecordingSetupTime) {
int ch = base->CreateChannel();
EXPECT_NE(-1, ch);
- EXPECT_TRUE(CreateAndInitializeCapturer(webrtc_audio_device.get()));
+ scoped_refptr<WebRtcAudioCapturer> capturer(
+ CreateAudioCapturer(webrtc_audio_device));
+ EXPECT_TRUE(capturer);
base::WaitableEvent event(false, false);
scoped_ptr<MockMediaStreamAudioSink> sink(
new MockMediaStreamAudioSink(&event));
@@ -870,8 +868,7 @@ TEST_F(MAYBE_WebRTCAudioDeviceTest, DISABLED_WebRtcRecordingSetupTime) {
// Create and start a local audio track. Starting the audio track will connect
// the audio track to the capturer and also start the source of the capturer.
scoped_refptr<WebRtcLocalAudioTrack> local_audio_track(
- CreateAndStartLocalAudioTrack(
- webrtc_audio_device->GetDefaultCapturer().get(), sink.get()));
+ CreateAndStartLocalAudioTrack(capturer, sink.get()));
// connect the VoE voice channel to the audio track.
static_cast<webrtc::AudioTrackInterface*>(local_audio_track.get())->
@@ -884,7 +881,7 @@ TEST_F(MAYBE_WebRTCAudioDeviceTest, DISABLED_WebRtcRecordingSetupTime) {
int delay = (base::Time::Now() - start_time).InMilliseconds();
PrintPerfResultMs("webrtc_recording_setup_c", "t", delay);
- webrtc_audio_device->GetDefaultCapturer()->Stop();
+ capturer->Stop();
EXPECT_EQ(0, base->StopSend(ch));
EXPECT_EQ(0, base->DeleteChannel(ch));
EXPECT_EQ(0, base->Terminate());
diff --git a/content/renderer/media/webrtc_local_audio_track_unittest.cc b/content/renderer/media/webrtc_local_audio_track_unittest.cc
index abd47d1..ea3c3bb 100644
--- a/content/renderer/media/webrtc_local_audio_track_unittest.cc
+++ b/content/renderer/media/webrtc_local_audio_track_unittest.cc
@@ -162,16 +162,13 @@ class WebRtcLocalAudioTrackTest : public ::testing::Test {
virtual void SetUp() OVERRIDE {
params_.Reset(media::AudioParameters::AUDIO_PCM_LOW_LATENCY,
media::CHANNEL_LAYOUT_STEREO, 2, 0, 48000, 16, 480);
- capturer_ = WebRtcAudioCapturer::CreateCapturer();
+ blink::WebMediaConstraints constraints;
+ capturer_ = WebRtcAudioCapturer::CreateCapturer(-1, StreamDeviceInfo(),
+ constraints, NULL);
capturer_source_ = new MockCapturerSource(capturer_.get());
- EXPECT_CALL(*capturer_source_.get(), OnInitialize(_, capturer_.get(), 0))
+ EXPECT_CALL(*capturer_source_.get(), OnInitialize(_, capturer_.get(), -1))
.WillOnce(Return());
- blink::WebMediaConstraints constraints;
- capturer_->SetCapturerSource(capturer_source_,
- params_.channel_layout(),
- params_.sample_rate(),
- params_.effects(),
- constraints);
+ capturer_->SetCapturerSourceForTesting(capturer_source_, params_);
}
media::AudioParameters params_;
@@ -428,37 +425,6 @@ TEST_F(WebRtcLocalAudioTrackTest, StartAndStopAudioTracks) {
capturer_->Stop();
}
-// Set new source to the existing capturer.
-TEST_F(WebRtcLocalAudioTrackTest, SetNewSourceForCapturerAfterStartTrack) {
- // Setup the audio track and start the track.
- EXPECT_CALL(*capturer_source_.get(), SetAutomaticGainControl(true));
- EXPECT_CALL(*capturer_source_.get(), OnStart());
- scoped_refptr<WebRtcLocalAudioTrack> track =
- WebRtcLocalAudioTrack::Create(std::string(), capturer_, NULL, NULL);
- static_cast<WebRtcLocalAudioSourceProvider*>(
- track->audio_source_provider())->SetSinkParamsForTesting(params_);
- track->Start();
-
- // Setting new source to the capturer and the track should still get packets.
- scoped_refptr<MockCapturerSource> new_source(
- new MockCapturerSource(capturer_.get()));
- EXPECT_CALL(*capturer_source_.get(), OnStop());
- EXPECT_CALL(*new_source.get(), SetAutomaticGainControl(true));
- EXPECT_CALL(*new_source.get(), OnInitialize(_, capturer_.get(), 0))
- .WillOnce(Return());
- EXPECT_CALL(*new_source.get(), OnStart());
- blink::WebMediaConstraints constraints;
- capturer_->SetCapturerSource(new_source,
- params_.channel_layout(),
- params_.sample_rate(),
- params_.effects(),
- constraints);
-
- // Stop the track.
- EXPECT_CALL(*new_source.get(), OnStop());
- capturer_->Stop();
-}
-
// Create a new capturer with new source, connect it to a new audio track.
TEST_F(WebRtcLocalAudioTrackTest, ConnectTracksToDifferentCapturers) {
// Setup the first audio track and start it.
@@ -487,17 +453,17 @@ TEST_F(WebRtcLocalAudioTrackTest, ConnectTracksToDifferentCapturers) {
track_1->AddSink(sink_1.get());
// Create a new capturer with new source with different audio format.
+ blink::WebMediaConstraints constraints;
scoped_refptr<WebRtcAudioCapturer> new_capturer(
- WebRtcAudioCapturer::CreateCapturer());
+ WebRtcAudioCapturer::CreateCapturer(-1, StreamDeviceInfo(),
+ constraints, NULL));
scoped_refptr<MockCapturerSource> new_source(
new MockCapturerSource(new_capturer.get()));
- EXPECT_CALL(*new_source.get(), OnInitialize(_, new_capturer.get(), 0));
- blink::WebMediaConstraints constraints;
- new_capturer->SetCapturerSource(new_source,
- media::CHANNEL_LAYOUT_MONO,
- 44100,
- media::AudioParameters::NO_EFFECTS,
- constraints);
+ EXPECT_CALL(*new_source.get(), OnInitialize(_, new_capturer.get(), -1));
+ media::AudioParameters new_param(
+ media::AudioParameters::AUDIO_PCM_LOW_LATENCY,
+ media::CHANNEL_LAYOUT_MONO, 44100, 16, 441);
+ new_capturer->SetCapturerSourceForTesting(new_source, new_param);
// Setup the second audio track, connect it to the new capturer and start it.
EXPECT_CALL(*new_source.get(), SetAutomaticGainControl(true));
@@ -520,7 +486,8 @@ TEST_F(WebRtcLocalAudioTrackTest, ConnectTracksToDifferentCapturers) {
EXPECT_CALL(
*sink_2,
CaptureData(
- kNumberOfNetworkChannelsForTrack2, 44100, 1, _, 0, 0, true, false))
+ kNumberOfNetworkChannelsForTrack2, new_param.sample_rate(),
+ new_param.channels(), _, 0, 0, true, false))
.Times(AnyNumber()).WillRepeatedly(Return());
EXPECT_CALL(*sink_2, OnSetFormat(_)).WillOnce(SignalEvent(&event));
track_2->AddSink(sink_2.get());
@@ -547,19 +514,20 @@ TEST_F(WebRtcLocalAudioTrackTest, TrackWorkWithSmallBufferSize) {
media::CHANNEL_LAYOUT_STEREO, 48000, 16, 128);
// Create a capturer with new source which works with the format above.
+ blink::WebMediaConstraints constraints;
scoped_refptr<WebRtcAudioCapturer> capturer(
- WebRtcAudioCapturer::CreateCapturer());
+ WebRtcAudioCapturer::CreateCapturer(
+ -1,
+ StreamDeviceInfo(MEDIA_DEVICE_AUDIO_CAPTURE,
+ "", "", params.sample_rate(),
+ params.channel_layout(),
+ params.frames_per_buffer()),
+ constraints,
+ NULL));
scoped_refptr<MockCapturerSource> source(
new MockCapturerSource(capturer.get()));
- blink::WebMediaConstraints constraints;
- capturer->Initialize(-1, params.channel_layout(), params.sample_rate(),
- params.frames_per_buffer(), 0, std::string(), 0, 0,
- params.effects(), constraints);
-
- EXPECT_CALL(*source.get(), OnInitialize(_, capturer.get(), 0));
- capturer->SetCapturerSource(source, params.channel_layout(),
- params.sample_rate(), params.effects(),
- constraints);
+ EXPECT_CALL(*source.get(), OnInitialize(_, capturer.get(), -1));
+ capturer->SetCapturerSourceForTesting(source, params);
// Setup a audio track, connect it to the capturer and start it.
EXPECT_CALL(*source.get(), SetAutomaticGainControl(true));
diff --git a/content/test/data/media/getusermedia.html b/content/test/data/media/getusermedia.html
index 92037cf..7481613 100644
--- a/content/test/data/media/getusermedia.html
+++ b/content/test/data/media/getusermedia.html
@@ -64,25 +64,61 @@
createAndRenderClone, failedCallback);
}
+ // Creates two MediaStream and renders them locally. When the video of both
+ // streams are detected to be rolling, we stop the local stream. Since both
+ // streams have the same source, both video streams should stop. If they do,
+ // the test succeeds.
+ function twoGetUserMediaAndStop(constraints) {
+ document.title = 'Calling Two GetUserMedia';
+ navigator.webkitGetUserMedia(
+ constraints,
+ function(stream) {
+ displayAndDetectVideo(stream, requestSecondGetUserMedia);
+ },
+ failedCallback);
+ var requestSecondGetUserMedia = function() {
+ navigator.webkitGetUserMedia(
+ constraints,
+ function(stream) {
+ displayIntoVideoElement(stream,
+ stopStreamAndVerifyAllLocalViewsDontPlayVideo, 'local-view-2');
+ },
+ failedCallback);
+ };
+
+ var stopStreamAndVerifyAllLocalViewsDontPlayVideo = function() {
+ gLocalStream.getVideoTracks()[0].stop();
+
+ // Since local-view and local-view-2 are playing the video from the same
+ // source, both of them should stop.
+ waitForVideoToStop('local-view');
+ waitForVideoToStop('local-view-2');
+ };
+ }
+
function failedCallback(error) {
document.title = 'GetUserMedia call failed with code ' + error.code;
sendValueToTest(document.title);
}
- function plugStreamIntoLocalView(stream) {
+ function plugStreamIntoVideoElement(stream, videoElement) {
gLocalStream = stream;
var localStreamUrl = URL.createObjectURL(stream);
- $('local-view').src = localStreamUrl;
+ $(videoElement).src = localStreamUrl;
}
- function displayAndDetectVideo(stream, callback) {
- plugStreamIntoLocalView(stream);
+ function displayIntoVideoElement(stream, callback, videoElement) {
+ plugStreamIntoVideoElement(stream, videoElement);
document.title = 'Waiting for video...';
- detectVideoPlaying('local-view', callback);
+ detectVideoPlaying(videoElement, callback);
+ }
+
+ function displayAndDetectVideo(stream, callback) {
+ displayIntoVideoElement(stream, callback, 'local-view');
}
function displayDetectAndAnalyzeVideo(stream) {
- plugStreamIntoLocalView(stream);
+ plugStreamIntoVideoElement(stream, 'local-view');
analyzeVideo();
}
@@ -134,10 +170,19 @@
<tr>
<td><video width="320" height="240" id="local-view"
autoplay="autoplay"></video></td>
- <!-- Canvases are named after their corresponding video elements. -->
<td><canvas width="320" height="240" id="local-view-canvas"
style="display:none"></canvas></td>
</tr>
+ <tr>
+ <td>Local Preview 2</td>
+ </tr>
+ <tr>
+ <td><video width="320" height="240" id="local-view-2"
+ autoplay="autoplay"></video></td>
+ <!-- Canvases are named after their corresponding video elements. -->
+ <td><canvas width="320" height="240" id="local-view-2-canvas"
+ style="display:none"></canvas></td>
+ </tr>
</table>
</body>
</html>