summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authortommi@chromium.org <tommi@chromium.org@0039d316-1c4b-4281-b951-d872f2087c98>2013-09-10 02:18:26 +0000
committertommi@chromium.org <tommi@chromium.org@0039d316-1c4b-4281-b951-d872f2087c98>2013-09-10 02:18:26 +0000
commit80c1b5a735a0a49d467980ec40fa74385f0479b0 (patch)
treecaf75063c4d5b249c2b3ac886cd83ecf628d0d76
parent482fe239c2fe16c17e0b97833e53a72718e79671 (diff)
downloadchromium_src-80c1b5a735a0a49d467980ec40fa74385f0479b0.zip
chromium_src-80c1b5a735a0a49d467980ec40fa74385f0479b0.tar.gz
chromium_src-80c1b5a735a0a49d467980ec40fa74385f0479b0.tar.bz2
Implicit audio output device selection for getUserMedia.
When a non-default input device is selected, do a best-effort selection of a matching output device. This is used to switch output of media stream audio tracks to the output device that matches the currently open capture device (microphone). An typical example is to support switching to USB headsets when in a audio/video call. This does not affect the audio output of non-webrtc related audio elements and only happens when there's exactly 1 active audio capture device in the page. BUG=276894 Review URL: https://chromiumcodereview.appspot.com/23731007 git-svn-id: svn://svn.chromium.org/chrome/trunk/src@222187 0039d316-1c4b-4281-b951-d872f2087c98
-rw-r--r--content/browser/renderer_host/media/audio_input_device_manager.cc32
-rw-r--r--content/browser/renderer_host/media/audio_input_device_manager_unittest.cc3
-rw-r--r--content/browser/renderer_host/media/audio_renderer_host.cc21
-rw-r--r--content/browser/renderer_host/media/media_stream_manager.cc8
-rw-r--r--content/common/media/media_stream_messages.h9
-rw-r--r--content/common/media/media_stream_options.cc11
-rw-r--r--content/common/media/media_stream_options.h2
-rw-r--r--content/public/common/media_stream_request.cc10
-rw-r--r--content/public/common/media_stream_request.h58
-rw-r--r--content/renderer/media/media_stream_dependency_factory.cc6
-rw-r--r--content/renderer/media/media_stream_dependency_factory_unittest.cc2
-rw-r--r--content/renderer/media/media_stream_impl.cc76
-rw-r--r--content/renderer/media/media_stream_impl.h10
-rw-r--r--content/renderer/media/webrtc_audio_device_unittest.cc18
-rw-r--r--content/renderer/media/webrtc_audio_renderer.cc30
-rw-r--r--content/renderer/media/webrtc_audio_renderer.h11
-rw-r--r--content/renderer/media/webrtc_local_audio_renderer.cc22
-rw-r--r--content/renderer/media/webrtc_local_audio_renderer.h14
-rw-r--r--content/test/webrtc_audio_device_test.cc12
-rw-r--r--content/test/webrtc_audio_device_test.h4
20 files changed, 280 insertions, 79 deletions
diff --git a/content/browser/renderer_host/media/audio_input_device_manager.cc b/content/browser/renderer_host/media/audio_input_device_manager.cc
index b495956..50bb3ec 100644
--- a/content/browser/renderer_host/media/audio_input_device_manager.cc
+++ b/content/browser/renderer_host/media/audio_input_device_manager.cc
@@ -34,7 +34,8 @@ AudioInputDeviceManager::AudioInputDeviceManager(
StreamDeviceInfo fake_device(MEDIA_DEVICE_AUDIO_CAPTURE,
media::AudioManagerBase::kDefaultDeviceName,
media::AudioManagerBase::kDefaultDeviceId,
- 44100, media::CHANNEL_LAYOUT_STEREO, false);
+ 44100, media::CHANNEL_LAYOUT_STEREO,
+ 0, false);
fake_device.session_id = kFakeOpenSessionId;
devices_.push_back(fake_device);
}
@@ -169,19 +170,37 @@ void AudioInputDeviceManager::OpenOnDeviceThread(
DCHECK(IsOnDeviceThread());
StreamDeviceInfo out(info.device.type, info.device.name, info.device.id,
- 0, 0, false);
+ 0, 0, 0, false);
out.session_id = session_id;
+
+ MediaStreamDevice::AudioDeviceParameters& input_params = out.device.input;
+
if (use_fake_device_) {
// Don't need to query the hardware information if using fake device.
- out.device.sample_rate = 44100;
- out.device.channel_layout = media::CHANNEL_LAYOUT_STEREO;
+ input_params.sample_rate = 44100;
+ input_params.channel_layout = media::CHANNEL_LAYOUT_STEREO;
} else {
// Get the preferred sample rate and channel configuration for the
// audio device.
media::AudioParameters params =
audio_manager_->GetInputStreamParameters(info.device.id);
- out.device.sample_rate = params.sample_rate();
- out.device.channel_layout = params.channel_layout();
+ input_params.sample_rate = params.sample_rate();
+ input_params.channel_layout = params.channel_layout();
+ input_params.frames_per_buffer = params.frames_per_buffer();
+
+ // Add preferred output device information if a matching output device
+ // exists.
+ out.device.matched_output_device_id =
+ audio_manager_->GetAssociatedOutputDeviceID(info.device.id);
+ if (!out.device.matched_output_device_id.empty()) {
+ params = audio_manager_->GetOutputStreamParameters(
+ out.device.matched_output_device_id);
+ MediaStreamDevice::AudioDeviceParameters& matched_output_params =
+ out.device.matched_output;
+ matched_output_params.sample_rate = params.sample_rate();
+ matched_output_params.channel_layout = params.channel_layout();
+ matched_output_params.frames_per_buffer = params.frames_per_buffer();
+ }
}
// Return the |session_id| through the listener by posting a task on
@@ -206,6 +225,7 @@ void AudioInputDeviceManager::OpenedOnIOThread(int session_id,
DCHECK(BrowserThread::CurrentlyOn(BrowserThread::IO));
DCHECK_EQ(session_id, info.session_id);
DCHECK(GetDevice(session_id) == devices_.end());
+
devices_.push_back(info);
if (listener_)
diff --git a/content/browser/renderer_host/media/audio_input_device_manager_unittest.cc b/content/browser/renderer_host/media/audio_input_device_manager_unittest.cc
index 03b31d2..25d8a17 100644
--- a/content/browser/renderer_host/media/audio_input_device_manager_unittest.cc
+++ b/content/browser/renderer_host/media/audio_input_device_manager_unittest.cc
@@ -177,7 +177,8 @@ TEST_F(AudioInputDeviceManagerTest, OpenNotExistingDevice) {
int sample_rate(0);
int channel_config(0);
StreamDeviceInfo dummy_device(
- stream_type, device_name, device_id, sample_rate, channel_config, false);
+ stream_type, device_name, device_id, sample_rate, channel_config, 2048,
+ false);
int session_id = manager_->Open(dummy_device);
EXPECT_CALL(*audio_input_listener_,
diff --git a/content/browser/renderer_host/media/audio_renderer_host.cc b/content/browser/renderer_host/media/audio_renderer_host.cc
index 1e1065a..c09dc6c 100644
--- a/content/browser/renderer_host/media/audio_renderer_host.cc
+++ b/content/browser/renderer_host/media/audio_renderer_host.cc
@@ -36,6 +36,7 @@ class AudioRendererHost::AudioEntry
int stream_id,
int render_view_id,
const media::AudioParameters& params,
+ const std::string& output_device_id,
const std::string& input_device_id,
scoped_ptr<base::SharedMemory> shared_memory,
scoped_ptr<media::AudioOutputController::SyncReader> reader);
@@ -88,6 +89,7 @@ class AudioRendererHost::AudioEntry
AudioRendererHost::AudioEntry::AudioEntry(
AudioRendererHost* host, int stream_id, int render_view_id,
const media::AudioParameters& params,
+ const std::string& output_device_id,
const std::string& input_device_id,
scoped_ptr<base::SharedMemory> shared_memory,
scoped_ptr<media::AudioOutputController::SyncReader> reader)
@@ -95,8 +97,7 @@ AudioRendererHost::AudioEntry::AudioEntry(
stream_id_(stream_id),
render_view_id_(render_view_id),
controller_(media::AudioOutputController::Create(
- // TODO(tommi): Feed in the proper output device id.
- host->audio_manager_, this, params, std::string(),
+ host->audio_manager_, this, params, output_device_id,
input_device_id, reader.get())),
shared_memory_(shared_memory.Pass()),
reader_(reader.Pass()) {
@@ -303,10 +304,16 @@ void AudioRendererHost::OnCreateStream(
// When the |input_channels| is valid, clients are trying to create a unified
// IO stream which opens an input device mapping to the |session_id|.
- std::string input_device_id;
+ // Initialize the |output_device_id| to an empty string which indicates that
+ // the default device should be used. If a StreamDeviceInfo instance was found
+ // though, then we use the matched output device.
+ std::string input_device_id, output_device_id;
+ const StreamDeviceInfo* info = media_stream_manager_->
+ audio_input_device_manager()->GetOpenedDeviceInfoById(session_id);
+ if (info)
+ output_device_id = info->device.matched_output_device_id;
+
if (input_channels > 0) {
- const StreamDeviceInfo* info = media_stream_manager_->
- audio_input_device_manager()->GetOpenedDeviceInfoById(session_id);
if (!info) {
SendErrorMessage(stream_id);
DLOG(WARNING) << "No permission has been granted to input stream with "
@@ -348,8 +355,8 @@ void AudioRendererHost::OnCreateStream(
media_observer->OnCreatingAudioStream(render_process_id_, render_view_id);
scoped_ptr<AudioEntry> entry(new AudioEntry(
- this, stream_id, render_view_id, params, input_device_id,
- shared_memory.Pass(),
+ this, stream_id, render_view_id, params, output_device_id,
+ input_device_id, shared_memory.Pass(),
reader.PassAs<media::AudioOutputController::SyncReader>()));
if (mirroring_manager_) {
mirroring_manager_->AddDiverter(
diff --git a/content/browser/renderer_host/media/media_stream_manager.cc b/content/browser/renderer_host/media/media_stream_manager.cc
index 4493311..4c234b6 100644
--- a/content/browser/renderer_host/media/media_stream_manager.cc
+++ b/content/browser/renderer_host/media/media_stream_manager.cc
@@ -740,8 +740,8 @@ void MediaStreamManager::Opened(MediaStreamType stream_type,
audio_input_device_manager_->GetOpenedDeviceInfoById(
device_it->session_id);
DCHECK_EQ(info->device.id, device_it->device.id);
- device_it->device.sample_rate = info->device.sample_rate;
- device_it->device.channel_layout = info->device.channel_layout;
+ device_it->device.input = info->device.input;
+ device_it->device.matched_output = info->device.matched_output;
}
audio_devices.push_back(*device_it);
} else if (IsVideoMediaType(device_it->device.type)) {
@@ -949,8 +949,8 @@ void MediaStreamManager::HandleAccessRequestResponse(
if (sample_rate <= 0 || sample_rate > 96000)
sample_rate = 44100;
- device_info.device.sample_rate = sample_rate;
- device_info.device.channel_layout = media::CHANNEL_LAYOUT_STEREO;
+ device_info.device.input.sample_rate = sample_rate;
+ device_info.device.input.channel_layout = media::CHANNEL_LAYOUT_STEREO;
}
}
diff --git a/content/common/media/media_stream_messages.h b/content/common/media/media_stream_messages.h
index 8edd0e0..6ee9f8f 100644
--- a/content/common/media/media_stream_messages.h
+++ b/content/common/media/media_stream_messages.h
@@ -30,8 +30,13 @@ IPC_STRUCT_TRAITS_BEGIN(content::StreamDeviceInfo)
IPC_STRUCT_TRAITS_MEMBER(device.type)
IPC_STRUCT_TRAITS_MEMBER(device.name)
IPC_STRUCT_TRAITS_MEMBER(device.id)
- IPC_STRUCT_TRAITS_MEMBER(device.sample_rate)
- IPC_STRUCT_TRAITS_MEMBER(device.channel_layout)
+ IPC_STRUCT_TRAITS_MEMBER(device.matched_output_device_id)
+ IPC_STRUCT_TRAITS_MEMBER(device.input.sample_rate)
+ IPC_STRUCT_TRAITS_MEMBER(device.input.channel_layout)
+ IPC_STRUCT_TRAITS_MEMBER(device.input.frames_per_buffer)
+ IPC_STRUCT_TRAITS_MEMBER(device.matched_output.sample_rate)
+ IPC_STRUCT_TRAITS_MEMBER(device.matched_output.channel_layout)
+ IPC_STRUCT_TRAITS_MEMBER(device.matched_output.frames_per_buffer)
IPC_STRUCT_TRAITS_MEMBER(in_use)
IPC_STRUCT_TRAITS_MEMBER(session_id)
IPC_STRUCT_TRAITS_END()
diff --git a/content/common/media/media_stream_options.cc b/content/common/media/media_stream_options.cc
index e042d8c..4b48bdb 100644
--- a/content/common/media/media_stream_options.cc
+++ b/content/common/media/media_stream_options.cc
@@ -48,9 +48,10 @@ StreamDeviceInfo::StreamDeviceInfo(MediaStreamType service_param,
const std::string& device_param,
int sample_rate,
int channel_layout,
+ int frames_per_buffer,
bool opened)
: device(service_param, device_param, name_param, sample_rate,
- channel_layout),
+ channel_layout, frames_per_buffer),
in_use(opened),
session_id(kNoId) {
}
@@ -58,11 +59,15 @@ StreamDeviceInfo::StreamDeviceInfo(MediaStreamType service_param,
// static
bool StreamDeviceInfo::IsEqual(const StreamDeviceInfo& first,
const StreamDeviceInfo& second) {
+ const MediaStreamDevice::AudioDeviceParameters& input_first =
+ first.device.input;
+ const MediaStreamDevice::AudioDeviceParameters& input_second =
+ second.device.input;
return first.device.type == second.device.type &&
first.device.name == second.device.name &&
first.device.id == second.device.id &&
- first.device.sample_rate == second.device.sample_rate &&
- first.device.channel_layout == second.device.channel_layout &&
+ input_first.sample_rate == input_second.sample_rate &&
+ input_first.channel_layout == input_second.channel_layout &&
first.in_use == second.in_use &&
first.session_id == second.session_id;
}
diff --git a/content/common/media/media_stream_options.h b/content/common/media/media_stream_options.h
index bbb1aa4..bd8c775 100644
--- a/content/common/media/media_stream_options.h
+++ b/content/common/media/media_stream_options.h
@@ -52,11 +52,13 @@ struct CONTENT_EXPORT StreamDeviceInfo {
const std::string& device_param,
int sample_rate,
int channel_layout,
+ int frames_per_buffer,
bool opened);
static bool IsEqual(const StreamDeviceInfo& first,
const StreamDeviceInfo& second);
MediaStreamDevice device;
+
// Set to true if the device has been opened, false otherwise.
bool in_use;
// Id for this capture session. Unique for all sessions of the same type.
diff --git a/content/public/common/media_stream_request.cc b/content/public/common/media_stream_request.cc
index e3ad35b..00883541 100644
--- a/content/public/common/media_stream_request.cc
+++ b/content/public/common/media_stream_request.cc
@@ -28,9 +28,7 @@ MediaStreamDevice::MediaStreamDevice(
const std::string& name)
: type(type),
id(id),
- name(name),
- sample_rate(0),
- channel_layout(0) {
+ name(name) {
}
MediaStreamDevice::MediaStreamDevice(
@@ -38,12 +36,12 @@ MediaStreamDevice::MediaStreamDevice(
const std::string& id,
const std::string& name,
int sample_rate,
- int channel_layout)
+ int channel_layout,
+ int frames_per_buffer)
: type(type),
id(id),
name(name),
- sample_rate(sample_rate),
- channel_layout(channel_layout) {
+ input(sample_rate, channel_layout, frames_per_buffer) {
}
MediaStreamDevice::~MediaStreamDevice() {}
diff --git a/content/public/common/media_stream_request.h b/content/public/common/media_stream_request.h
index cbbf232..26fe9c1 100644
--- a/content/public/common/media_stream_request.h
+++ b/content/public/common/media_stream_request.h
@@ -71,7 +71,8 @@ struct CONTENT_EXPORT MediaStreamDevice {
const std::string& id,
const std::string& name,
int sample_rate,
- int channel_layout);
+ int channel_layout,
+ int frames_per_buffer);
~MediaStreamDevice();
@@ -81,20 +82,53 @@ struct CONTENT_EXPORT MediaStreamDevice {
// The device's unique ID.
std::string id;
+ // The device id of a matched output device if any (otherwise empty).
+ // Only applicable to audio devices.
+ std::string matched_output_device_id;
+
// The device's "friendly" name. Not guaranteed to be unique.
std::string name;
- // Preferred sample rate in samples per second for the device.
- // Only utilized for audio devices. Will be set to 0 if the constructor
- // with three parameters (intended for video) is used.
- int sample_rate;
-
- // Preferred channel configuration for the device.
- // Only utilized for audio devices. Will be set to 0 if the constructor
- // with three parameters (intended for video) is used.
- // TODO(henrika): ideally, we would like to use media::ChannelLayout here
- // but including media/base/channel_layout.h violates checkdeps rules.
- int channel_layout;
+ // Contains properties that match directly with those with the same name
+ // in media::AudioParameters.
+ struct AudioDeviceParameters {
+ AudioDeviceParameters()
+ : sample_rate(), channel_layout(), frames_per_buffer() {
+ }
+
+ AudioDeviceParameters(int sample_rate, int channel_layout,
+ int frames_per_buffer)
+ : sample_rate(sample_rate),
+ channel_layout(channel_layout),
+ frames_per_buffer(frames_per_buffer) {
+ }
+
+ // Preferred sample rate in samples per second for the device.
+ int sample_rate;
+
+ // Preferred channel configuration for the device.
+ // TODO(henrika): ideally, we would like to use media::ChannelLayout here
+ // but including media/base/channel_layout.h violates checkdeps rules.
+ int channel_layout;
+
+ // Preferred number of frames per buffer for the device. This is filled
+ // in on the browser side and can be used by the renderer to match the
+ // expected browser side settings and avoid unnecessary buffering.
+ // See media::AudioParameters for more.
+ int frames_per_buffer;
+ };
+
+ // These below two member variables are valid only when the type of device is
+ // audio (i.e. IsAudioMediaType returns true).
+
+ // Contains the device properties of the capture device.
+ AudioDeviceParameters input;
+
+ // If the capture device has an associated output device (e.g. headphones),
+ // this will contain the properties for the output device. If no such device
+ // exists (e.g. webcam w/mic), then the value of this member will be all
+ // zeros.
+ AudioDeviceParameters matched_output;
};
typedef std::vector<MediaStreamDevice> MediaStreamDevices;
diff --git a/content/renderer/media/media_stream_dependency_factory.cc b/content/renderer/media/media_stream_dependency_factory.cc
index c7ed7e9..0034d35 100644
--- a/content/renderer/media/media_stream_dependency_factory.cc
+++ b/content/renderer/media/media_stream_dependency_factory.cc
@@ -815,8 +815,10 @@ MediaStreamDependencyFactory::MaybeCreateAudioCapturer(
if (!capturer->Initialize(
render_view_id,
- static_cast<media::ChannelLayout>(device_info.device.channel_layout),
- device_info.device.sample_rate, device_info.session_id,
+ static_cast<media::ChannelLayout>(
+ device_info.device.input.channel_layout),
+ device_info.device.input.sample_rate,
+ device_info.session_id,
device_info.device.id)) {
return NULL;
}
diff --git a/content/renderer/media/media_stream_dependency_factory_unittest.cc b/content/renderer/media/media_stream_dependency_factory_unittest.cc
index b257f7e..8cc562c 100644
--- a/content/renderer/media/media_stream_dependency_factory_unittest.cc
+++ b/content/renderer/media/media_stream_dependency_factory_unittest.cc
@@ -69,8 +69,6 @@ class MediaStreamDependencyFactoryTest : public ::testing::Test {
StreamDeviceInfo info;
info.device.type = content::MEDIA_DEVICE_AUDIO_CAPTURE;
info.device.name = "audio";
- info.device.sample_rate = 0;
- info.device.channel_layout = 0;
info.session_id = 99;
audio_sources[0].initialize("audio",
WebKit::WebMediaStreamSource::TypeAudio,
diff --git a/content/renderer/media/media_stream_impl.cc b/content/renderer/media/media_stream_impl.cc
index 9eab908..d3b2c742 100644
--- a/content/renderer/media/media_stream_impl.cc
+++ b/content/renderer/media/media_stream_impl.cc
@@ -21,6 +21,8 @@
#include "content/renderer/media/webrtc_audio_renderer.h"
#include "content/renderer/media/webrtc_local_audio_renderer.h"
#include "content/renderer/media/webrtc_uma_histograms.h"
+#include "content/renderer/render_thread_impl.h"
+#include "media/base/audio_hardware_config.h"
#include "third_party/WebKit/public/platform/WebMediaConstraints.h"
#include "third_party/WebKit/public/platform/WebMediaStreamSource.h"
#include "third_party/WebKit/public/platform/WebMediaStreamTrack.h"
@@ -120,6 +122,15 @@ webrtc::MediaStreamInterface* GetNativeMediaStream(
return extra_data->stream().get();
}
+void GetDefaultOutputDeviceParams(
+ int* output_sample_rate, int* output_buffer_size) {
+ // Fetch the default audio output hardware config.
+ media::AudioHardwareConfig* hardware_config =
+ RenderThreadImpl::current()->GetAudioHardwareConfig();
+ *output_sample_rate = hardware_config->GetOutputSampleRate();
+ *output_buffer_size = hardware_config->GetOutputBufferSize();
+}
+
} // namespace
MediaStreamImpl::MediaStreamImpl(
@@ -592,7 +603,15 @@ scoped_refptr<WebRtcAudioRenderer> MediaStreamImpl::CreateRemoteAudioRenderer(
DVLOG(1) << "MediaStreamImpl::CreateRemoteAudioRenderer label:"
<< stream->label();
- return new WebRtcAudioRenderer(RenderViewObserver::routing_id());
+ int session_id = 0, sample_rate = 0, buffer_size = 0;
+ if (!GetAuthorizedDeviceInfoForAudioRenderer(&session_id,
+ &sample_rate,
+ &buffer_size)) {
+ GetDefaultOutputDeviceParams(&sample_rate, &buffer_size);
+ }
+
+ return new WebRtcAudioRenderer(RenderViewObserver::routing_id(),
+ session_id, sample_rate, buffer_size);
}
scoped_refptr<WebRtcLocalAudioRenderer>
@@ -611,11 +630,21 @@ MediaStreamImpl::CreateLocalAudioRenderer(
<< "audio_track.id : " << audio_track->id()
<< "audio_track.enabled: " << audio_track->enabled();
+ int session_id = 0, sample_rate = 0, buffer_size = 0;
+ if (!GetAuthorizedDeviceInfoForAudioRenderer(&session_id,
+ &sample_rate,
+ &buffer_size)) {
+ GetDefaultOutputDeviceParams(&sample_rate, &buffer_size);
+ }
+
// Create a new WebRtcLocalAudioRenderer instance and connect it to the
// existing WebRtcAudioCapturer so that the renderer can use it as source.
return new WebRtcLocalAudioRenderer(
static_cast<WebRtcLocalAudioTrack*>(audio_track),
- RenderViewObserver::routing_id());
+ RenderViewObserver::routing_id(),
+ session_id,
+ sample_rate,
+ buffer_size);
}
void MediaStreamImpl::StopLocalAudioTrack(
@@ -639,6 +668,49 @@ void MediaStreamImpl::StopLocalAudioTrack(
}
}
+bool MediaStreamImpl::GetAuthorizedDeviceInfoForAudioRenderer(
+ int* session_id,
+ int* output_sample_rate,
+ int* output_frames_per_buffer) {
+ DCHECK(CalledOnValidThread());
+
+ const StreamDeviceInfo* device_info = NULL;
+ WebKit::WebString session_id_str;
+ UserMediaRequests::iterator it = user_media_requests_.begin();
+ for (; it != user_media_requests_.end(); ++it) {
+ UserMediaRequestInfo* request = (*it);
+ for (size_t i = 0; i < request->audio_sources.size(); ++i) {
+ const WebKit::WebMediaStreamSource& source = request->audio_sources[i];
+ if (source.readyState() == WebKit::WebMediaStreamSource::ReadyStateEnded)
+ continue;
+
+ if (!session_id_str.isEmpty() &&
+ !session_id_str.equals(source.deviceId())) {
+ DVLOG(1) << "Multiple capture devices are open so we can't pick a "
+ "session for a matching output device.";
+ return false;
+ }
+
+ // TODO(tommi): Storing the session id in the deviceId field doesn't
+ // feel right. Move it over to MediaStreamSourceExtraData?
+ session_id_str = source.deviceId();
+ content::MediaStreamSourceExtraData* extra_data =
+ static_cast<content::MediaStreamSourceExtraData*>(source.extraData());
+ device_info = &extra_data->device_info();
+ }
+ }
+
+ if (session_id_str.isEmpty() || !device_info)
+ return false;
+
+ base::StringToInt(UTF16ToUTF8(session_id_str), session_id);
+ *output_sample_rate = device_info->device.matched_output.sample_rate;
+ *output_frames_per_buffer =
+ device_info->device.matched_output.frames_per_buffer;
+
+ return true;
+}
+
MediaStreamSourceExtraData::MediaStreamSourceExtraData(
const StreamDeviceInfo& device_info,
const WebKit::WebMediaStreamSource& webkit_source)
diff --git a/content/renderer/media/media_stream_impl.h b/content/renderer/media/media_stream_impl.h
index 1cd4479..149ce4f 100644
--- a/content/renderer/media/media_stream_impl.h
+++ b/content/renderer/media/media_stream_impl.h
@@ -155,6 +155,16 @@ class CONTENT_EXPORT MediaStreamImpl
void StopLocalAudioTrack(const WebKit::WebMediaStream& web_stream);
+ // Returns a valid session id if a single capture device is currently open
+ // (and then the matching session_id), otherwise -1.
+ // This is used to pass on a session id to a webrtc audio renderer (either
+ // local or remote), so that audio will be rendered to a matching output
+ // device, should one exist.
+ // Note that if there are more than one open capture devices the function
+ // will not be able to pick an appropriate device and return false.
+ bool GetAuthorizedDeviceInfoForAudioRenderer(
+ int* session_id, int* output_sample_rate, int* output_buffer_size);
+
// Weak ref to a MediaStreamDependencyFactory, owned by the RenderThread.
// It's valid for the lifetime of RenderThread.
MediaStreamDependencyFactory* dependency_factory_;
diff --git a/content/renderer/media/webrtc_audio_device_unittest.cc b/content/renderer/media/webrtc_audio_device_unittest.cc
index ac41e4f..5013103 100644
--- a/content/renderer/media/webrtc_audio_device_unittest.cc
+++ b/content/renderer/media/webrtc_audio_device_unittest.cc
@@ -547,8 +547,9 @@ TEST_F(MAYBE_WebRTCAudioDeviceTest, MAYBE_StartPlayout) {
ch, webrtc::kPlaybackPerChannel, *media_process.get()));
EXPECT_EQ(0, base->StartPlayout(ch));
- scoped_refptr<WebRtcAudioRenderer> renderer =
- new WebRtcAudioRenderer(kRenderViewId);
+ scoped_refptr<WebRtcAudioRenderer> renderer(
+ CreateDefaultWebRtcAudioRenderer(kRenderViewId));
+
EXPECT_TRUE(webrtc_audio_device->SetAudioRenderer(renderer.get()));
renderer->Play();
@@ -722,8 +723,8 @@ TEST_F(MAYBE_WebRTCAudioDeviceTest, MAYBE_PlayLocalFile) {
int ch = base->CreateChannel();
EXPECT_NE(-1, ch);
EXPECT_EQ(0, base->StartPlayout(ch));
- scoped_refptr<WebRtcAudioRenderer> renderer =
- new WebRtcAudioRenderer(kRenderViewId);
+ scoped_refptr<WebRtcAudioRenderer> renderer(
+ CreateDefaultWebRtcAudioRenderer(kRenderViewId));
EXPECT_TRUE(webrtc_audio_device->SetAudioRenderer(renderer.get()));
renderer->Play();
@@ -832,8 +833,8 @@ TEST_F(MAYBE_WebRTCAudioDeviceTest, MAYBE_FullDuplexAudioWithAGC) {
EXPECT_EQ(0, network->RegisterExternalTransport(ch, *transport.get()));
EXPECT_EQ(0, base->StartPlayout(ch));
EXPECT_EQ(0, base->StartSend(ch));
- scoped_refptr<WebRtcAudioRenderer> renderer =
- new WebRtcAudioRenderer(kRenderViewId);
+ scoped_refptr<WebRtcAudioRenderer> renderer(
+ CreateDefaultWebRtcAudioRenderer(kRenderViewId));
EXPECT_TRUE(webrtc_audio_device->SetAudioRenderer(renderer.get()));
renderer->Play();
@@ -943,8 +944,9 @@ TEST_F(MAYBE_WebRTCAudioDeviceTest, MAYBE_WebRtcPlayoutSetupTime) {
base::WaitableEvent event(false, false);
scoped_ptr<MockWebRtcAudioRendererSource> renderer_source(
new MockWebRtcAudioRendererSource(&event));
- scoped_refptr<WebRtcAudioRenderer> renderer =
- new WebRtcAudioRenderer(kRenderViewId);
+
+ scoped_refptr<WebRtcAudioRenderer> renderer(
+ CreateDefaultWebRtcAudioRenderer(kRenderViewId));
renderer->Initialize(renderer_source.get());
// Start the timer and playout.
diff --git a/content/renderer/media/webrtc_audio_renderer.cc b/content/renderer/media/webrtc_audio_renderer.cc
index caa4ab8..664ae33 100644
--- a/content/renderer/media/webrtc_audio_renderer.cc
+++ b/content/renderer/media/webrtc_audio_renderer.cc
@@ -9,11 +9,9 @@
#include "base/strings/string_util.h"
#include "content/renderer/media/audio_device_factory.h"
#include "content/renderer/media/webrtc_audio_device_impl.h"
-#include "content/renderer/render_thread_impl.h"
#include "media/audio/audio_output_device.h"
#include "media/audio/audio_parameters.h"
#include "media/audio/sample_rates.h"
-#include "media/base/audio_hardware_config.h"
#if defined(OS_WIN)
#include "base/win/windows_version.h"
@@ -90,13 +88,19 @@ void AddHistogramFramesPerBuffer(int param) {
} // namespace
-WebRtcAudioRenderer::WebRtcAudioRenderer(int source_render_view_id)
+WebRtcAudioRenderer::WebRtcAudioRenderer(int source_render_view_id,
+ int session_id,
+ int sample_rate,
+ int frames_per_buffer)
: state_(UNINITIALIZED),
source_render_view_id_(source_render_view_id),
+ session_id_(session_id),
source_(NULL),
play_ref_count_(0),
audio_delay_milliseconds_(0),
- fifo_delay_milliseconds_(0) {
+ fifo_delay_milliseconds_(0),
+ sample_rate_(sample_rate),
+ frames_per_buffer_(frames_per_buffer) {
}
WebRtcAudioRenderer::~WebRtcAudioRenderer() {
@@ -120,10 +124,10 @@ bool WebRtcAudioRenderer::Initialize(WebRtcAudioRendererSource* source) {
DVLOG(1) << "Using mono audio output for Android";
channel_layout = media::CHANNEL_LAYOUT_MONO;
#endif
- // Ask the renderer for the default audio output hardware sample-rate.
- media::AudioHardwareConfig* hardware_config =
- RenderThreadImpl::current()->GetAudioHardwareConfig();
- int sample_rate = hardware_config->GetOutputSampleRate();
+
+ // TODO(tommi,henrika): Maybe we should just change |sample_rate_| to be
+ // immutable and change its value instead of using a temporary?
+ int sample_rate = sample_rate_;
DVLOG(1) << "Audio output hardware sample rate: " << sample_rate;
// WebRTC does not yet support higher rates than 96000 on the client side
@@ -178,7 +182,7 @@ bool WebRtcAudioRenderer::Initialize(WebRtcAudioRendererSource* source) {
#if defined(OS_ANDROID)
buffer_size = kDefaultOutputBufferSize;
#else
- buffer_size = hardware_config->GetOutputBufferSize();
+ buffer_size = frames_per_buffer_;
#endif
sink_params.Reset(media::AudioParameters::AUDIO_PCM_LOW_LATENCY,
@@ -206,7 +210,6 @@ bool WebRtcAudioRenderer::Initialize(WebRtcAudioRendererSource* source) {
}
}
-
// Allocate local audio buffers based on the parameters above.
// It is assumed that each audio sample contains 16 bits and each
// audio frame contains one or two audio samples depending on the
@@ -219,7 +222,12 @@ bool WebRtcAudioRenderer::Initialize(WebRtcAudioRendererSource* source) {
// Configure the audio rendering client and start rendering.
sink_ = AudioDeviceFactory::NewOutputDevice(source_render_view_id_);
- sink_->Initialize(sink_params, this);
+
+ // TODO(tommi): Rename InitializeUnifiedStream to rather reflect association
+ // with a session.
+ DCHECK_GE(session_id_, 0);
+ sink_->InitializeUnifiedStream(sink_params, this, session_id_);
+
sink_->Start();
// User must call Play() before any audio can be heard.
diff --git a/content/renderer/media/webrtc_audio_renderer.h b/content/renderer/media/webrtc_audio_renderer.h
index 577c993..1112c61 100644
--- a/content/renderer/media/webrtc_audio_renderer.h
+++ b/content/renderer/media/webrtc_audio_renderer.h
@@ -13,6 +13,7 @@
#include "media/base/audio_decoder.h"
#include "media/base/audio_pull_fifo.h"
#include "media/base/audio_renderer_sink.h"
+#include "media/base/channel_layout.h"
namespace media {
class AudioOutputDevice;
@@ -28,7 +29,10 @@ class CONTENT_EXPORT WebRtcAudioRenderer
: NON_EXPORTED_BASE(public media::AudioRendererSink::RenderCallback),
NON_EXPORTED_BASE(public MediaStreamAudioRenderer) {
public:
- explicit WebRtcAudioRenderer(int source_render_view_id);
+ WebRtcAudioRenderer(int source_render_view_id,
+ int session_id,
+ int sample_rate,
+ int frames_per_buffer);
// Initialize function called by clients like WebRtcAudioDeviceImpl.
// Stop() has to be called before |source| is deleted.
@@ -72,6 +76,7 @@ class CONTENT_EXPORT WebRtcAudioRenderer
// The render view in which the audio is rendered into |sink_|.
const int source_render_view_id_;
+ const int session_id_;
// The sink (destination) for rendered audio.
scoped_refptr<media::AudioOutputDevice> sink_;
@@ -100,6 +105,10 @@ class CONTENT_EXPORT WebRtcAudioRenderer
// Delay due to the FIFO in milliseconds.
int fifo_delay_milliseconds_;
+ // The preferred sample rate and buffer sizes provided via the ctor.
+ const int sample_rate_;
+ const int frames_per_buffer_;
+
DISALLOW_IMPLICIT_CONSTRUCTORS(WebRtcAudioRenderer);
};
diff --git a/content/renderer/media/webrtc_local_audio_renderer.cc b/content/renderer/media/webrtc_local_audio_renderer.cc
index af65d8d..3b94e24 100644
--- a/content/renderer/media/webrtc_local_audio_renderer.cc
+++ b/content/renderer/media/webrtc_local_audio_renderer.cc
@@ -10,11 +10,9 @@
#include "base/synchronization/lock.h"
#include "content/renderer/media/audio_device_factory.h"
#include "content/renderer/media/webrtc_audio_capturer.h"
-#include "content/renderer/render_thread_impl.h"
#include "media/audio/audio_output_device.h"
#include "media/base/audio_bus.h"
#include "media/base/audio_fifo.h"
-#include "media/base/audio_hardware_config.h"
namespace content {
@@ -96,10 +94,16 @@ void WebRtcLocalAudioRenderer::SetCaptureFormat(
// WebRtcLocalAudioRenderer::WebRtcLocalAudioRenderer implementation.
WebRtcLocalAudioRenderer::WebRtcLocalAudioRenderer(
WebRtcLocalAudioTrack* audio_track,
- int source_render_view_id)
+ int source_render_view_id,
+ int session_id,
+ int sample_rate,
+ int frames_per_buffer)
: audio_track_(audio_track),
source_render_view_id_(source_render_view_id),
- playing_(false) {
+ session_id_(session_id),
+ playing_(false),
+ sample_rate_(sample_rate),
+ frames_per_buffer_(frames_per_buffer) {
DCHECK(audio_track);
DVLOG(1) << "WebRtcLocalAudioRenderer::WebRtcLocalAudioRenderer()";
}
@@ -133,11 +137,6 @@ void WebRtcLocalAudioRenderer::Start() {
loopback_fifo_.reset(new media::AudioFifo(
audio_params_.channels(), 10 * audio_params_.frames_per_buffer()));
-#if defined(OS_ANDROID)
- media::AudioHardwareConfig* hardware_config =
- RenderThreadImpl::current()->GetAudioHardwareConfig();
-#endif
-
media::AudioParameters sink_params(audio_params_.format(),
audio_params_.channel_layout(),
audio_params_.sample_rate(),
@@ -147,17 +146,18 @@ void WebRtcLocalAudioRenderer::Start() {
// achieve low latency mode, we need use buffer size suggested by
// AudioManager for the sink paramters which will be used to decide
// buffer size for shared memory buffer.
- hardware_config->GetOutputBufferSize()
+ frames_per_buffer_
#else
2 * audio_params_.frames_per_buffer()
#endif
);
+
sink_ = AudioDeviceFactory::NewOutputDevice(source_render_view_id_);
// TODO(henrika): we could utilize the unified audio here instead and do
// sink_->InitializeIO(sink_params, 2, callback_.get());
// It would then be possible to avoid using the WebRtcAudioCapturer.
- sink_->Initialize(sink_params, this);
+ sink_->InitializeUnifiedStream(sink_params, this, session_id_);
// Start the capturer and local rendering. Note that, the capturer is owned
// by the WebRTC ADM and might already bee running.
diff --git a/content/renderer/media/webrtc_local_audio_renderer.h b/content/renderer/media/webrtc_local_audio_renderer.h
index f58aee3..f77d523 100644
--- a/content/renderer/media/webrtc_local_audio_renderer.h
+++ b/content/renderer/media/webrtc_local_audio_renderer.h
@@ -48,7 +48,10 @@ class CONTENT_EXPORT WebRtcLocalAudioRenderer
// The |source| is owned by the WebRtcAudioDeviceImpl.
// Called on the main thread.
WebRtcLocalAudioRenderer(WebRtcLocalAudioTrack* audio_track,
- int source_render_view_id);
+ int source_render_view_id,
+ int session_id,
+ int sample_rate,
+ int frames_per_buffer);
// MediaStreamAudioRenderer implementation.
// Called on the main thread.
@@ -101,6 +104,7 @@ class CONTENT_EXPORT WebRtcLocalAudioRenderer
// The render view in which the audio is rendered into |sink_|.
const int source_render_view_id_;
+ const int session_id_;
// The sink (destination) for rendered audio.
scoped_refptr<media::AudioOutputDevice> sink_;
@@ -128,6 +132,14 @@ class CONTENT_EXPORT WebRtcLocalAudioRenderer
// Protects |loopback_fifo_|, |playing_| and |sink_|.
mutable base::Lock thread_lock_;
+ // The preferred sample rate and buffer sizes provided via the ctor.
+ const int sample_rate_;
+ const int frames_per_buffer_;
+
+ // The preferred device id of the output device or empty for the default
+ // output device.
+ const std::string output_device_id_;
+
DISALLOW_COPY_AND_ASSIGN(WebRtcLocalAudioRenderer);
};
diff --git a/content/test/webrtc_audio_device_test.cc b/content/test/webrtc_audio_device_test.cc
index eb2f1c7..9b0a819 100644
--- a/content/test/webrtc_audio_device_test.cc
+++ b/content/test/webrtc_audio_device_test.cc
@@ -201,6 +201,18 @@ void MAYBE_WebRTCAudioDeviceTest::SetAudioHardwareConfig(
audio_hardware_config_ = hardware_config;
}
+scoped_refptr<WebRtcAudioRenderer>
+MAYBE_WebRTCAudioDeviceTest::CreateDefaultWebRtcAudioRenderer(
+ int render_view_id) {
+ media::AudioHardwareConfig* hardware_config =
+ RenderThreadImpl::current()->GetAudioHardwareConfig();
+ int sample_rate = hardware_config->GetOutputSampleRate();
+ int frames_per_buffer = hardware_config->GetOutputBufferSize();
+
+ return new WebRtcAudioRenderer(render_view_id, 0, sample_rate,
+ frames_per_buffer);
+}
+
void MAYBE_WebRTCAudioDeviceTest::InitializeIOThread(const char* thread_name) {
#if defined(OS_WIN)
// We initialize COM (STA) on our IO thread as is done in Chrome.
diff --git a/content/test/webrtc_audio_device_test.h b/content/test/webrtc_audio_device_test.h
index b2fefaf..927e1dc 100644
--- a/content/test/webrtc_audio_device_test.h
+++ b/content/test/webrtc_audio_device_test.h
@@ -53,6 +53,7 @@ class MediaStreamManager;
class RenderThreadImpl;
class ResourceContext;
class TestBrowserThread;
+class WebRtcAudioRenderer;
class WebRTCMockRenderProcess;
// Scoped class for WebRTC interfaces. Fetches the wrapped interface
@@ -134,6 +135,9 @@ class MAYBE_WebRTCAudioDeviceTest : public ::testing::Test,
void SetAudioHardwareConfig(media::AudioHardwareConfig* hardware_config);
+ scoped_refptr<WebRtcAudioRenderer> CreateDefaultWebRtcAudioRenderer(
+ int render_view_id);
+
protected:
void InitializeIOThread(const char* thread_name);
void UninitializeIOThread();