summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authortommi <tommi@chromium.org>2015-08-28 03:02:08 -0700
committerCommit bot <commit-bot@chromium.org>2015-08-28 10:02:43 +0000
commit13c341a15d70751583937a2608ae82525010f7b3 (patch)
tree5b71bf0cb63bc2aac14b5423f8d7dcdea42c9db0
parentaf0753a8ec04bd54f3234c9e5a899014a7e010ea (diff)
downloadchromium_src-13c341a15d70751583937a2608ae82525010f7b3.zip
chromium_src-13c341a15d70751583937a2608ae82525010f7b3.tar.gz
chromium_src-13c341a15d70751583937a2608ae82525010f7b3.tar.bz2
Include default communication devices in audio device enumerations. This removes heuristic that picks the communications output device for rendering when associated with an active communications capture device. It also removes the goog specific constraint we offered to turn off ducking, since ducking will now always be turned off for the default device.
BUG=525443 Review URL: https://codereview.chromium.org/1314803003 Cr-Commit-Position: refs/heads/master@{#346113}
-rw-r--r--content/browser/renderer_host/media/media_stream_manager.cc7
-rw-r--r--content/common/media/media_stream_options.cc1
-rw-r--r--content/common/media/media_stream_options.h3
-rw-r--r--content/renderer/media/media_stream_audio_processor_options.cc16
-rw-r--r--content/renderer/media/media_stream_audio_processor_unittest.cc7
-rw-r--r--content/renderer/media/media_stream_dispatcher.cc15
-rw-r--r--content/renderer/media/media_stream_dispatcher.h6
-rw-r--r--content/renderer/media/media_stream_dispatcher_unittest.cc62
-rw-r--r--content/renderer/media/webrtc/peer_connection_dependency_factory.cc10
-rw-r--r--content/renderer/media/webrtc_audio_renderer.cc18
-rw-r--r--content/renderer/media/webrtc_local_audio_renderer.cc16
-rw-r--r--media/audio/audio_manager_base.cc2
-rw-r--r--media/audio/audio_manager_base.h4
-rw-r--r--media/audio/win/audio_low_latency_input_win.cc60
-rw-r--r--media/audio/win/audio_low_latency_input_win.h5
-rw-r--r--media/audio/win/audio_low_latency_output_win.cc9
-rw-r--r--media/audio/win/audio_manager_win.cc12
-rw-r--r--media/audio/win/core_audio_util_win.cc36
18 files changed, 67 insertions, 222 deletions
diff --git a/content/browser/renderer_host/media/media_stream_manager.cc b/content/browser/renderer_host/media/media_stream_manager.cc
index a4a95e0..5071bff1 100644
--- a/content/browser/renderer_host/media/media_stream_manager.cc
+++ b/content/browser/renderer_host/media/media_stream_manager.cc
@@ -127,12 +127,7 @@ void ParseStreamType(const StreamOptions& options,
// explicitly turn them off.
void FilterAudioEffects(const StreamOptions& options, int* effects) {
DCHECK(effects);
- // TODO(ajm): Should we also handle ECHO_CANCELLER here?
- std::string value;
- if (options.GetFirstAudioConstraintByName(
- kMediaStreamAudioDucking, &value, NULL) && value == "false") {
- *effects &= ~media::AudioParameters::DUCKING;
- }
+ // TODO(ajm): Should we handle ECHO_CANCELLER here?
}
// Unlike other effects, hotword is off by default, so turn it on if it's
diff --git a/content/common/media/media_stream_options.cc b/content/common/media/media_stream_options.cc
index dfba2e8..795e36c 100644
--- a/content/common/media/media_stream_options.cc
+++ b/content/common/media/media_stream_options.cc
@@ -19,7 +19,6 @@ const char kMediaStreamRenderToAssociatedSink[] =
"chromeRenderToAssociatedSink";
// The prefix of this constant is 'goog' to match with other getUserMedia
// constraints for audio.
-const char kMediaStreamAudioDucking[] = "googDucking";
const char kMediaStreamAudioHotword[] = "googHotword";
namespace {
diff --git a/content/common/media/media_stream_options.h b/content/common/media/media_stream_options.h
index 9f208da..e795fdb 100644
--- a/content/common/media/media_stream_options.h
+++ b/content/common/media/media_stream_options.h
@@ -28,9 +28,6 @@ CONTENT_EXPORT extern const char kMediaStreamSourceSystem[];
// device belongs to.
CONTENT_EXPORT extern const char kMediaStreamRenderToAssociatedSink[];
-// Controls whether ducking of audio is enabled on platforms that support it.
-CONTENT_EXPORT extern const char kMediaStreamAudioDucking[];
-
// Controls whether the hotword audio stream is used on platforms that support
// it.
CONTENT_EXPORT extern const char kMediaStreamAudioHotword[];
diff --git a/content/renderer/media/media_stream_audio_processor_options.cc b/content/renderer/media/media_stream_audio_processor_options.cc
index 1f875f9..5c61a05 100644
--- a/content/renderer/media/media_stream_audio_processor_options.cc
+++ b/content/renderer/media/media_stream_audio_processor_options.cc
@@ -66,19 +66,9 @@ struct {
{ MediaAudioConstraints::kGoogTypingNoiseDetection, true },
{ MediaAudioConstraints::kGoogExperimentalNoiseSuppression, false },
{ MediaAudioConstraints::kGoogBeamforming, false },
-#if defined(OS_WIN)
- { kMediaStreamAudioDucking, true },
-#else
- { kMediaStreamAudioDucking, false },
-#endif
{ kMediaStreamAudioHotword, false },
};
-bool IsAudioProcessingConstraint(const std::string& key) {
- // |kMediaStreamAudioDucking| does not require audio processing.
- return key != kMediaStreamAudioDucking;
-}
-
// Used to log echo quality based on delay estimates.
enum DelayBasedEchoQuality {
DELAY_BASED_ECHO_QUALITY_GOOD = 0,
@@ -213,11 +203,7 @@ bool MediaAudioConstraints::IsValid() const {
bool MediaAudioConstraints::GetDefaultValueForConstraint(
const blink::WebMediaConstraints& constraints,
const std::string& key) const {
- // |kMediaStreamAudioDucking| is not restricted by
- // |default_audio_processing_constraint_value_| since it does not require
- // audio processing.
- if (!default_audio_processing_constraint_value_ &&
- IsAudioProcessingConstraint(key))
+ if (!default_audio_processing_constraint_value_)
return false;
for (size_t i = 0; i < arraysize(kDefaultAudioConstraints); ++i) {
diff --git a/content/renderer/media/media_stream_audio_processor_unittest.cc b/content/renderer/media/media_stream_audio_processor_unittest.cc
index 5a1d7a8..0e40ece 100644
--- a/content/renderer/media/media_stream_audio_processor_unittest.cc
+++ b/content/renderer/media/media_stream_audio_processor_unittest.cc
@@ -337,7 +337,7 @@ TEST_F(MediaStreamAudioProcessorTest, VerifyConstraints) {
{
// When |kEchoCancellation| is explicitly set to false, the default values
- // for all the constraints except |kMediaStreamAudioDucking| are false.
+ // for all the constraints are false.
MockMediaConstraintFactory constraint_factory;
constraint_factory.AddOptional(MediaAudioConstraints::kEchoCancellation,
false);
@@ -347,11 +347,6 @@ TEST_F(MediaStreamAudioProcessorTest, VerifyConstraints) {
for (size_t i = 0; i < arraysize(kDefaultAudioConstraints); ++i) {
EXPECT_FALSE(audio_constraints.GetProperty(kDefaultAudioConstraints[i]));
}
-#if defined(OS_WIN)
- EXPECT_TRUE(audio_constraints.GetProperty(kMediaStreamAudioDucking));
-#else
- EXPECT_FALSE(audio_constraints.GetProperty(kMediaStreamAudioDucking));
-#endif
}
{
diff --git a/content/renderer/media/media_stream_dispatcher.cc b/content/renderer/media/media_stream_dispatcher.cc
index 361a4ce..c5123f1 100644
--- a/content/renderer/media/media_stream_dispatcher.cc
+++ b/content/renderer/media/media_stream_dispatcher.cc
@@ -403,19 +403,4 @@ int MediaStreamDispatcher::video_session_id(const std::string& label,
return it->second.video_array[index].session_id;
}
-bool MediaStreamDispatcher::IsAudioDuckingActive() const {
- DCHECK(thread_checker_.CalledOnValidThread());
- LabelStreamMap::const_iterator stream_it = label_stream_map_.begin();
- while (stream_it != label_stream_map_.end()) {
- const StreamDeviceInfoArray& audio_array = stream_it->second.audio_array;
- for (StreamDeviceInfoArray::const_iterator device_it = audio_array.begin();
- device_it != audio_array.end(); ++device_it) {
- if (device_it->device.input.effects & media::AudioParameters::DUCKING)
- return true;
- }
- ++stream_it;
- }
- return false;
-}
-
} // namespace content
diff --git a/content/renderer/media/media_stream_dispatcher.h b/content/renderer/media/media_stream_dispatcher.h
index 7784b44..b136a0e 100644
--- a/content/renderer/media/media_stream_dispatcher.h
+++ b/content/renderer/media/media_stream_dispatcher.h
@@ -86,11 +86,6 @@ class CONTENT_EXPORT MediaStreamDispatcher
// Returns an audio session_id given a label and an index.
virtual int audio_session_id(const std::string& label, int index);
- // Returns true if an audio input stream is currently active that was opened
- // with audio ducking enabled. This is information is used when playing out
- // audio so that rendered audio can be excluded from the ducking operation.
- bool IsAudioDuckingActive() const;
-
protected:
int GetNextIpcIdForTest() { return next_ipc_id_; }
@@ -98,7 +93,6 @@ class CONTENT_EXPORT MediaStreamDispatcher
FRIEND_TEST_ALL_PREFIXES(MediaStreamDispatcherTest, BasicVideoDevice);
FRIEND_TEST_ALL_PREFIXES(MediaStreamDispatcherTest, TestFailure);
FRIEND_TEST_ALL_PREFIXES(MediaStreamDispatcherTest, CancelGenerateStream);
- FRIEND_TEST_ALL_PREFIXES(MediaStreamDispatcherTest, CheckDuckingState);
struct Request;
diff --git a/content/renderer/media/media_stream_dispatcher_unittest.cc b/content/renderer/media/media_stream_dispatcher_unittest.cc
index 32acc88..eb68780 100644
--- a/content/renderer/media/media_stream_dispatcher_unittest.cc
+++ b/content/renderer/media/media_stream_dispatcher_unittest.cc
@@ -405,66 +405,4 @@ TEST_F(MediaStreamDispatcherTest, DeviceClosed) {
StreamDeviceInfo::kNoId);
}
-TEST_F(MediaStreamDispatcherTest, CheckDuckingState) {
- scoped_ptr<MediaStreamDispatcher> dispatcher(new MediaStreamDispatcher(NULL));
- scoped_ptr<MockMediaStreamDispatcherEventHandler>
- handler(new MockMediaStreamDispatcherEventHandler);
- StreamOptions components(true, false); // audio only.
- int ipc_request_id1 = dispatcher->next_ipc_id_;
-
- dispatcher->GenerateStream(kRequestId1, handler.get()->AsWeakPtr(),
- components, GURL());
- EXPECT_EQ(1u, dispatcher->requests_.size());
-
- // Ducking isn't active at this point.
- EXPECT_FALSE(dispatcher->IsAudioDuckingActive());
-
- // Complete the creation of stream1 with a single audio track that has
- // ducking enabled.
- StreamDeviceInfoArray audio_device_array(1);
- StreamDeviceInfo& audio_device_info = audio_device_array[0];
- audio_device_info.device.name = "Microphone";
- audio_device_info.device.type = kAudioType;
- audio_device_info.session_id = kAudioSessionId;
- audio_device_info.device.input.effects = media::AudioParameters::DUCKING;
-
- StreamDeviceInfoArray video_device_array; // Empty for this test.
-
- const char kStreamLabel[] = "stream1";
- dispatcher->OnMessageReceived(MediaStreamMsg_StreamGenerated(
- kRouteId, ipc_request_id1, kStreamLabel,
- audio_device_array, video_device_array));
- EXPECT_EQ(handler->request_id_, kRequestId1);
- EXPECT_EQ(0u, dispatcher->requests_.size());
-
- // Ducking should now be reported as active.
- EXPECT_TRUE(dispatcher->IsAudioDuckingActive());
-
- // Stop the device (removes the stream).
- dispatcher->OnMessageReceived(
- MediaStreamMsg_DeviceStopped(kRouteId, kStreamLabel,
- handler->audio_device_));
-
- // Ducking should now be reported as inactive again.
- EXPECT_FALSE(dispatcher->IsAudioDuckingActive());
-
- // Now do the same sort of test with the DUCKING flag off.
- audio_device_info.device.input.effects =
- media::AudioParameters::ECHO_CANCELLER;
-
- dispatcher->OnMessageReceived(MediaStreamMsg_StreamGenerated(
- kRouteId, ipc_request_id1, kStreamLabel,
- audio_device_array, video_device_array));
- EXPECT_EQ(handler->request_id_, kRequestId1);
- EXPECT_EQ(0u, dispatcher->requests_.size());
-
- // Ducking should still be reported as not active.
- EXPECT_FALSE(dispatcher->IsAudioDuckingActive());
-
- // Stop the device (removes the stream).
- dispatcher->OnMessageReceived(
- MediaStreamMsg_DeviceStopped(kRouteId, kStreamLabel,
- handler->audio_device_));
-}
-
} // namespace content
diff --git a/content/renderer/media/webrtc/peer_connection_dependency_factory.cc b/content/renderer/media/webrtc/peer_connection_dependency_factory.cc
index 0fecca7..81d34b9 100644
--- a/content/renderer/media/webrtc/peer_connection_dependency_factory.cc
+++ b/content/renderer/media/webrtc/peer_connection_dependency_factory.cc
@@ -65,8 +65,6 @@ struct {
const char* constraint;
const media::AudioParameters::PlatformEffectsMask effect;
} const kConstraintEffectMap[] = {
- { content::kMediaStreamAudioDucking,
- media::AudioParameters::DUCKING },
{ webrtc::MediaConstraintsInterface::kGoogEchoCancellation,
media::AudioParameters::ECHO_CANCELLER },
};
@@ -103,14 +101,6 @@ void HarmonizeConstraintsAndEffects(RTCMediaConstraints* constraints,
}
DVLOG(1) << "Disabling constraint: "
<< kConstraintEffectMap[i].constraint;
- } else if (kConstraintEffectMap[i].effect ==
- media::AudioParameters::DUCKING && value && !is_mandatory) {
- // Special handling of the DUCKING flag that sets the optional
- // constraint to |false| to match what the device will support.
- constraints->AddOptional(kConstraintEffectMap[i].constraint,
- webrtc::MediaConstraintsInterface::kValueFalse, true);
- // No need to modify |effects| since the ducking flag is already off.
- DCHECK((*effects & media::AudioParameters::DUCKING) == 0);
}
}
}
diff --git a/content/renderer/media/webrtc_audio_renderer.cc b/content/renderer/media/webrtc_audio_renderer.cc
index 9d2b201..0cd3bf2 100644
--- a/content/renderer/media/webrtc_audio_renderer.cc
+++ b/content/renderer/media/webrtc_audio_renderer.cc
@@ -131,21 +131,6 @@ class SharedAudioRenderer : public MediaStreamAudioRenderer {
OnPlayStateChanged on_play_state_changed_;
};
-// Returns either AudioParameters::NO_EFFECTS or AudioParameters::DUCKING
-// depending on whether or not an input element is currently open with
-// ducking enabled.
-int GetCurrentDuckingFlag(int render_frame_id) {
- RenderFrameImpl* const frame =
- RenderFrameImpl::FromRoutingID(render_frame_id);
- MediaStreamDispatcher* const dispatcher = frame ?
- frame->GetMediaStreamDispatcher() : NULL;
- if (dispatcher && dispatcher->IsAudioDuckingActive()) {
- return media::AudioParameters::DUCKING;
- }
-
- return media::AudioParameters::NO_EFFECTS;
-}
-
} // namespace
int WebRtcAudioRenderer::GetOptimalBufferSize(int sample_rate,
@@ -200,8 +185,7 @@ WebRtcAudioRenderer::WebRtcAudioRenderer(
fifo_delay_milliseconds_(0),
sink_params_(media::AudioParameters::AUDIO_PCM_LOW_LATENCY,
media::CHANNEL_LAYOUT_STEREO, sample_rate, 16,
- frames_per_buffer,
- GetCurrentDuckingFlag(source_render_frame_id)),
+ frames_per_buffer),
render_callback_count_(0) {
WebRtcLogMessage(base::StringPrintf(
"WAR::WAR. source_render_frame_id=%d"
diff --git a/content/renderer/media/webrtc_local_audio_renderer.cc b/content/renderer/media/webrtc_local_audio_renderer.cc
index 6bbdd08..55282c8 100644
--- a/content/renderer/media/webrtc_local_audio_renderer.cc
+++ b/content/renderer/media/webrtc_local_audio_renderer.cc
@@ -255,18 +255,6 @@ void WebRtcLocalAudioRenderer::ReconfigureSink(
DVLOG(1) << "WebRtcLocalAudioRenderer::ReconfigureSink()";
- int implicit_ducking_effect = 0;
- RenderFrameImpl* const frame =
- RenderFrameImpl::FromRoutingID(source_render_frame_id_);
- MediaStreamDispatcher* const dispatcher = frame ?
- frame->GetMediaStreamDispatcher() : NULL;
- if (dispatcher && dispatcher->IsAudioDuckingActive()) {
- DVLOG(1) << "Forcing DUCKING to be ON for output";
- implicit_ducking_effect = media::AudioParameters::DUCKING;
- } else {
- DVLOG(1) << "DUCKING not forced ON for output";
- }
-
if (source_params_.Equals(params))
return;
@@ -280,9 +268,7 @@ void WebRtcLocalAudioRenderer::ReconfigureSink(
source_params_.bits_per_sample(),
WebRtcAudioRenderer::GetOptimalBufferSize(source_params_.sample_rate(),
frames_per_buffer_),
- // If DUCKING is enabled on the source, it needs to be enabled on the
- // sink as well.
- source_params_.effects() | implicit_ducking_effect);
+ source_params_.effects());
{
// Note: The max buffer is fairly large, but will rarely be used.
diff --git a/media/audio/audio_manager_base.cc b/media/audio/audio_manager_base.cc
index ea97f15..ebf1d36 100644
--- a/media/audio/audio_manager_base.cc
+++ b/media/audio/audio_manager_base.cc
@@ -34,6 +34,8 @@ static const int kMaxInputChannels = 3;
const char AudioManagerBase::kDefaultDeviceName[] = "Default";
const char AudioManagerBase::kDefaultDeviceId[] = "default";
+const char AudioManagerBase::kCommunicationsDeviceId[] = "communications";
+const char AudioManagerBase::kCommunicationsDeviceName[] = "Communications";
const char AudioManagerBase::kLoopbackInputDeviceId[] = "loopback";
struct AudioManagerBase::DispatcherParams {
diff --git a/media/audio/audio_manager_base.h b/media/audio/audio_manager_base.h
index 866ecef..346fe47 100644
--- a/media/audio/audio_manager_base.h
+++ b/media/audio/audio_manager_base.h
@@ -35,6 +35,10 @@ class MEDIA_EXPORT AudioManagerBase : public AudioManager {
static const char kDefaultDeviceName[];
// Unique Id of the generic "default" device.
static const char kDefaultDeviceId[];
+ // Unique Id of the generic default communications device (where supported).
+ static const char kCommunicationsDeviceId[];
+ // Name of the generic default communications device (where supported).
+ static const char kCommunicationsDeviceName[];
// Input device ID used to capture the default system playback stream. When
// this device ID is passed to MakeAudioInputStream() the returned
diff --git a/media/audio/win/audio_low_latency_input_win.cc b/media/audio/win/audio_low_latency_input_win.cc
index 66792fc..53f1462 100644
--- a/media/audio/win/audio_low_latency_input_win.cc
+++ b/media/audio/win/audio_low_latency_input_win.cc
@@ -28,7 +28,6 @@ WASAPIAudioInputStream::WASAPIAudioInputStream(AudioManagerWin* manager,
packet_size_frames_(0),
packet_size_bytes_(0),
endpoint_buffer_size_frames_(0),
- effects_(params.effects()),
device_id_(device_id),
perf_count_to_100ns_units_(0.0),
ms_to_frame_count_(0.0),
@@ -433,47 +432,22 @@ HRESULT WASAPIAudioInputStream::SetCaptureDevice() {
// Retrieve the IMMDevice by using the specified role or the specified
// unique endpoint device-identification string.
- if (effects_ & AudioParameters::DUCKING) {
- // Ducking has been requested and it is only supported for the default
- // communication device. So, let's open up the communication device and
- // see if the ID of that device matches the requested ID.
- // We consider a kDefaultDeviceId as well as an explicit device id match,
- // to be valid matches.
+ if (device_id_ == AudioManagerBase::kDefaultDeviceId) {
+ // Retrieve the default capture audio endpoint for the specified role.
+ // Note that, in Windows Vista, the MMDevice API supports device roles
+ // but the system-supplied user interface programs do not.
+ hr = enumerator->GetDefaultAudioEndpoint(eCapture, eConsole,
+ endpoint_device_.Receive());
+ } else if (device_id_ == AudioManagerBase::kCommunicationsDeviceId) {
hr = enumerator->GetDefaultAudioEndpoint(eCapture, eCommunications,
endpoint_device_.Receive());
- if (endpoint_device_.get() &&
- device_id_ != AudioManagerBase::kDefaultDeviceId) {
- base::win::ScopedCoMem<WCHAR> communications_id;
- endpoint_device_->GetId(&communications_id);
- if (device_id_ !=
- base::WideToUTF8(static_cast<WCHAR*>(communications_id))) {
- DLOG(WARNING) << "Ducking has been requested for a non-default device."
- "Not supported.";
- // We can't honor the requested effect flag, so turn it off and
- // continue. We'll check this flag later to see if we've actually
- // opened up the communications device, so it's important that it
- // reflects the active state.
- effects_ &= ~AudioParameters::DUCKING;
- endpoint_device_.Release(); // Fall back on code below.
- }
- }
- }
-
- if (!endpoint_device_.get()) {
- if (device_id_ == AudioManagerBase::kDefaultDeviceId) {
- // Retrieve the default capture audio endpoint for the specified role.
- // Note that, in Windows Vista, the MMDevice API supports device roles
- // but the system-supplied user interface programs do not.
- hr = enumerator->GetDefaultAudioEndpoint(eCapture, eConsole,
- endpoint_device_.Receive());
- } else if (device_id_ == AudioManagerBase::kLoopbackInputDeviceId) {
- // Capture the default playback stream.
- hr = enumerator->GetDefaultAudioEndpoint(eRender, eConsole,
- endpoint_device_.Receive());
- } else {
- hr = enumerator->GetDevice(base::UTF8ToUTF16(device_id_).c_str(),
- endpoint_device_.Receive());
- }
+ } else if (device_id_ == AudioManagerBase::kLoopbackInputDeviceId) {
+ // Capture the default playback stream.
+ hr = enumerator->GetDefaultAudioEndpoint(eRender, eConsole,
+ endpoint_device_.Receive());
+ } else {
+ hr = enumerator->GetDevice(base::UTF8ToUTF16(device_id_).c_str(),
+ endpoint_device_.Receive());
}
if (FAILED(hr))
@@ -571,8 +545,7 @@ HRESULT WASAPIAudioInputStream::InitializeAudioEngine() {
if (device_id_ == AudioManagerBase::kLoopbackInputDeviceId) {
flags = AUDCLNT_STREAMFLAGS_LOOPBACK | AUDCLNT_STREAMFLAGS_NOPERSIST;
} else {
- flags =
- AUDCLNT_STREAMFLAGS_EVENTCALLBACK | AUDCLNT_STREAMFLAGS_NOPERSIST;
+ flags = AUDCLNT_STREAMFLAGS_EVENTCALLBACK | AUDCLNT_STREAMFLAGS_NOPERSIST;
}
// Initialize the audio stream between the client and the device.
@@ -587,7 +560,8 @@ HRESULT WASAPIAudioInputStream::InitializeAudioEngine() {
0, // hnsBufferDuration
0,
&format_,
- (effects_ & AudioParameters::DUCKING) ? &kCommunicationsSessionId : NULL);
+ device_id_ == AudioManagerBase::kCommunicationsDeviceId ?
+ &kCommunicationsSessionId : nullptr);
if (FAILED(hr))
return hr;
diff --git a/media/audio/win/audio_low_latency_input_win.h b/media/audio/win/audio_low_latency_input_win.h
index f88b8dd..f88c614 100644
--- a/media/audio/win/audio_low_latency_input_win.h
+++ b/media/audio/win/audio_low_latency_input_win.h
@@ -147,11 +147,6 @@ class MEDIA_EXPORT WASAPIAudioInputStream
// Length of the audio endpoint buffer.
uint32 endpoint_buffer_size_frames_;
- // A copy of the supplied AudioParameter's |effects|. If ducking was
- // specified (desired device=communications) but we ended up not being
- // able to open the communications device, this flag will be cleared.
- int effects_;
-
// Contains the unique name of the selected endpoint device.
// Note that AudioManagerBase::kDefaultDeviceId represents the default
// device role and is not a valid ID as such.
diff --git a/media/audio/win/audio_low_latency_output_win.cc b/media/audio/win/audio_low_latency_output_win.cc
index 494d1b1..c297acc 100644
--- a/media/audio/win/audio_low_latency_output_win.cc
+++ b/media/audio/win/audio_low_latency_output_win.cc
@@ -72,6 +72,12 @@ WASAPIAudioOutputStream::WASAPIAudioOutputStream(AudioManagerWin* manager,
audio_bus_(AudioBus::Create(params)) {
DCHECK(manager_);
+ // The empty string is used to indicate a default device and the
+ // |device_role_| member controls whether that's the default or default
+ // communications device.
+ DCHECK_NE(device_id_, AudioManagerBase::kDefaultDeviceId);
+ DCHECK_NE(device_id_, AudioManagerBase::kCommunicationsDeviceId);
+
DVLOG(1) << "WASAPIAudioOutputStream::WASAPIAudioOutputStream()";
DVLOG_IF(1, share_mode_ == AUDCLNT_SHAREMODE_EXCLUSIVE)
<< "Core Audio (WASAPI) EXCLUSIVE MODE is enabled.";
@@ -140,8 +146,7 @@ bool WASAPIAudioOutputStream::Open() {
// Create an IAudioClient interface for the default rendering IMMDevice.
ScopedComPtr<IAudioClient> audio_client;
- if (device_id_.empty() ||
- CoreAudioUtil::DeviceIsDefault(eRender, device_role_, device_id_)) {
+ if (device_id_.empty()) {
audio_client = CoreAudioUtil::CreateDefaultClient(eRender, device_role_);
communications_device = (device_role_ == eCommunications);
} else {
diff --git a/media/audio/win/audio_manager_win.cc b/media/audio/win/audio_manager_win.cc
index 7da916d..4a3b008 100644
--- a/media/audio/win/audio_manager_win.cc
+++ b/media/audio/win/audio_manager_win.cc
@@ -283,9 +283,14 @@ void AudioManagerWin::GetAudioDeviceNamesImpl(
GetOutputDeviceNamesWin(device_names);
}
- // Always add default device parameters as first element.
if (!device_names->empty()) {
AudioDeviceName name;
+ if (enumeration_type() == kMMDeviceEnumeration) {
+ name.device_name = AudioManagerBase::kCommunicationsDeviceName;
+ name.unique_id = AudioManagerBase::kCommunicationsDeviceId;
+ device_names->push_front(name);
+ }
+ // Always add default device parameters as first element.
name.device_name = AudioManagerBase::kDefaultDeviceName;
name.unique_id = AudioManagerBase::kDefaultDeviceId;
device_names->push_front(name);
@@ -377,11 +382,12 @@ AudioOutputStream* AudioManagerWin::MakeLowLatencyOutputStream(
// Pass an empty string to indicate that we want the default device
// since we consistently only check for an empty string in
// WASAPIAudioOutputStream.
+ bool communications = device_id == AudioManagerBase::kCommunicationsDeviceId;
return new WASAPIAudioOutputStream(this,
- device_id == AudioManagerBase::kDefaultDeviceId ?
+ communications || device_id == AudioManagerBase::kDefaultDeviceId ?
std::string() : device_id,
params,
- params.effects() & AudioParameters::DUCKING ? eCommunications : eConsole);
+ communications ? eCommunications : eConsole);
}
// Factory for the implementations of AudioInputStream for AUDIO_PCM_LINEAR
diff --git a/media/audio/win/core_audio_util_win.cc b/media/audio/win/core_audio_util_win.cc
index ec1e5bc..8442fc1 100644
--- a/media/audio/win/core_audio_util_win.cc
+++ b/media/audio/win/core_audio_util_win.cc
@@ -423,7 +423,26 @@ std::string CoreAudioUtil::GetAudioControllerID(IMMDevice* device,
std::string CoreAudioUtil::GetMatchingOutputDeviceID(
const std::string& input_device_id) {
- ScopedComPtr<IMMDevice> input_device(CreateDevice(input_device_id));
+ // Special handling for the default communications device.
+ // We always treat the configured communications devices, as a pair.
+ // If we didn't do that and the user has e.g. configured a mic of a headset
+ // as the default comms input device and a different device (not the speakers
+ // of the headset) as the default comms output device, then we would otherwise
+ // here pick the headset as the matched output device. That's technically
+ // correct, but the user experience would be that any audio played out to
+ // the matched device, would get ducked since it's not the default comms
+ // device. So here, we go with the user's configuration.
+ if (input_device_id == AudioManagerBase::kCommunicationsDeviceId)
+ return AudioManagerBase::kCommunicationsDeviceId;
+
+ ScopedComPtr<IMMDevice> input_device;
+ if (input_device_id.empty() ||
+ input_device_id == AudioManagerBase::kDefaultDeviceId) {
+ input_device = CreateDefaultDevice(eCapture, eConsole);
+ } else {
+ input_device = CreateDevice(input_device_id);
+ }
+
if (!input_device.get())
return std::string();
@@ -723,6 +742,9 @@ HRESULT CoreAudioUtil::GetPreferredAudioParameters(const std::string& device_id,
} else if (device_id == AudioManagerBase::kLoopbackInputDeviceId) {
DCHECK(!is_output_device);
device = CoreAudioUtil::CreateDefaultDevice(eRender, eConsole);
+ } else if (device_id == AudioManagerBase::kCommunicationsDeviceId) {
+ device = CoreAudioUtil::CreateDefaultDevice(
+ is_output_device ? eRender : eCapture, eCommunications);
} else {
device = CreateDevice(device_id);
}
@@ -755,18 +777,6 @@ HRESULT CoreAudioUtil::GetPreferredAudioParameters(const std::string& device_id,
params->frames_per_buffer());
}
- ScopedComPtr<IMMDevice> communications_device(
- CreateDefaultDevice(eCapture, eCommunications));
- if (communications_device &&
- GetDeviceID(communications_device.get()) == GetDeviceID(device.get())) {
- // Raise the 'DUCKING' flag for default communication devices.
- *params =
- AudioParameters(params->format(), params->channel_layout(),
- params->channels(), params->sample_rate(),
- params->bits_per_sample(), params->frames_per_buffer(),
- params->effects() | AudioParameters::DUCKING);
- }
-
return hr;
}