summaryrefslogtreecommitdiffstats
path: root/media
diff options
context:
space:
mode:
authorhenrika@chromium.org <henrika@chromium.org@0039d316-1c4b-4281-b951-d872f2087c98>2012-11-14 10:09:33 +0000
committerhenrika@chromium.org <henrika@chromium.org@0039d316-1c4b-4281-b951-d872f2087c98>2012-11-14 10:09:33 +0000
commit5f02d1749459f4a99d0818c888dc6b0fb780ef05 (patch)
tree9ba608a0bed1c864b3b8aeedb8529aadd479608e /media
parentcbab9b3530e7be88f438f454b85e1d9576ddefd9 (diff)
downloadchromium_src-5f02d1749459f4a99d0818c888dc6b0fb780ef05.zip
chromium_src-5f02d1749459f4a99d0818c888dc6b0fb780ef05.tar.gz
chromium_src-5f02d1749459f4a99d0818c888dc6b0fb780ef05.tar.bz2
Port Unified audio class on Windows using the new Core Audio utility
BUG=none TEST=media_unittests.exe --gtest_filter=CoreAudio* TEST= out/Debug/media_unittests.exe --gtest_filter=WASAPIUni* --enable-webaudio-input Manual tests of enable/disable of audio output devices. Tested http://webaudiodemos.appspot.com/input/index.html in Chrome using the --enable-webaudio-input flag. Review URL: https://codereview.chromium.org/11340014 git-svn-id: svn://svn.chromium.org/chrome/trunk/src@167636 0039d316-1c4b-4281-b951-d872f2087c98
Diffstat (limited to 'media')
-rw-r--r--media/audio/audio_util.cc14
-rw-r--r--media/audio/win/audio_device_listener_win.cc52
-rw-r--r--media/audio/win/audio_device_listener_win.h7
-rw-r--r--media/audio/win/audio_unified_win.cc381
-rw-r--r--media/audio/win/audio_unified_win.h44
-rw-r--r--media/audio/win/audio_unified_win_unittest.cc43
-rw-r--r--media/audio/win/core_audio_util_win.cc334
-rw-r--r--media/audio/win/core_audio_util_win.h87
-rw-r--r--media/audio/win/core_audio_util_win_unittest.cc195
9 files changed, 728 insertions, 429 deletions
diff --git a/media/audio/audio_util.cc b/media/audio/audio_util.cc
index 8f05410..ac43b70 100644
--- a/media/audio/audio_util.cc
+++ b/media/audio/audio_util.cc
@@ -32,7 +32,7 @@
#include "media/audio/audio_manager_base.h"
#include "media/audio/win/audio_low_latency_input_win.h"
#include "media/audio/win/audio_low_latency_output_win.h"
-#include "media/audio/win/audio_unified_win.h"
+#include "media/audio/win/core_audio_util_win.h"
#include "media/base/limits.h"
#include "media/base/media_switches.h"
#endif
@@ -252,13 +252,13 @@ size_t GetAudioHardwareBufferSize() {
return 256;
}
- // TODO(henrika): remove when HardwareBufferSize() has been tested well
- // enough to be moved from WASAPIUnifiedStream to WASAPIAudioOutputStream.
+ // TODO(henrika): remove when the --enable-webaudio-input flag is no longer
+ // utilized.
if (cmd_line->HasSwitch(switches::kEnableWebAudioInput)) {
- int buffer_size = WASAPIUnifiedStream::HardwareBufferSize(eRender);
- // |buffer_size| can be zero if we use e.g. remote desktop or if all
- // audio devices are disabled.
- return (buffer_size > 0) ? buffer_size : kFallbackBufferSize;
+ AudioParameters params;
+ HRESULT hr = CoreAudioUtil::GetPreferredAudioParameters(eRender, eConsole,
+ &params);
+ return FAILED(hr) ? kFallbackBufferSize : params.frames_per_buffer();
}
// This call must be done on a COM thread configured as MTA.
diff --git a/media/audio/win/audio_device_listener_win.cc b/media/audio/win/audio_device_listener_win.cc
index 588852e..9312af6 100644
--- a/media/audio/win/audio_device_listener_win.cc
+++ b/media/audio/win/audio_device_listener_win.cc
@@ -11,60 +11,46 @@
#include "base/win/scoped_co_mem.h"
#include "base/win/windows_version.h"
#include "media/audio/audio_util.h"
+#include "media/audio/win/core_audio_util_win.h"
using base::win::ScopedCoMem;
namespace media {
-// TODO(henrika): Move to CoreAudioUtil class.
-static ScopedComPtr<IMMDeviceEnumerator> CreateDeviceEnumerator() {
- ScopedComPtr<IMMDeviceEnumerator> device_enumerator;
- HRESULT hr = CoCreateInstance(__uuidof(MMDeviceEnumerator),
- NULL,
- CLSCTX_INPROC_SERVER,
- __uuidof(IMMDeviceEnumerator),
- device_enumerator.ReceiveVoid());
- DLOG_IF(ERROR, FAILED(hr)) << "CoCreateInstance(IMMDeviceEnumerator): "
- << std::hex << hr;
- return device_enumerator;
-}
-
AudioDeviceListenerWin::AudioDeviceListenerWin(const base::Closure& listener_cb)
: listener_cb_(listener_cb) {
- CHECK(media::IsWASAPISupported());
+ CHECK(CoreAudioUtil::IsSupported());
- device_enumerator_ = CreateDeviceEnumerator();
- if (!device_enumerator_)
- return;
+ ScopedComPtr<IMMDeviceEnumerator> device_enumerator(
+ CoreAudioUtil::CreateDeviceEnumerator());
+ if (!device_enumerator)
+ return;
- HRESULT hr = device_enumerator_->RegisterEndpointNotificationCallback(this);
+ HRESULT hr = device_enumerator->RegisterEndpointNotificationCallback(this);
if (FAILED(hr)) {
DLOG(ERROR) << "RegisterEndpointNotificationCallback failed: "
<< std::hex << hr;
- device_enumerator_ = NULL;
return;
}
- ScopedComPtr<IMMDevice> endpoint_render_device;
- hr = device_enumerator_->GetDefaultAudioEndpoint(
- eRender, eConsole, endpoint_render_device.Receive());
- // This will fail if there are no audio devices currently plugged in, so we
- // still want to keep our endpoint registered.
- if (FAILED(hr)) {
- DVLOG(1) << "GetDefaultAudioEndpoint() failed. No devices? Error: "
- << std::hex << hr;
+ device_enumerator_ = device_enumerator;
+
+ ScopedComPtr<IMMDevice> device =
+ CoreAudioUtil::CreateDefaultDevice(eRender, eConsole);
+ if (!device) {
+ // Most probable reason for ending up here is that all audio devices are
+ // disabled or unplugged.
+ DVLOG(1) << "CoreAudioUtil::CreateDefaultDevice failed. No device?";
return;
}
- ScopedCoMem<WCHAR> render_device_id;
- hr = endpoint_render_device->GetId(&render_device_id);
+ AudioDeviceName device_name;
+ hr = CoreAudioUtil::GetDeviceName(device, &device_name);
if (FAILED(hr)) {
- DLOG(ERROR) << "GetId() failed: " << std::hex << hr;
+ DVLOG(1) << "Failed to retrieve the device id: " << std::hex << hr;
return;
}
-
- default_render_device_id_ = WideToUTF8(static_cast<WCHAR*>(render_device_id));
- DVLOG(1) << "Default render device: " << default_render_device_id_;
+ default_render_device_id_ = device_name.unique_id;
}
AudioDeviceListenerWin::~AudioDeviceListenerWin() {
diff --git a/media/audio/win/audio_device_listener_win.h b/media/audio/win/audio_device_listener_win.h
index d2c38b3..6a31251 100644
--- a/media/audio/win/audio_device_listener_win.h
+++ b/media/audio/win/audio_device_listener_win.h
@@ -20,10 +20,9 @@ namespace media {
// IMMNotificationClient implementation for listening for default device changes
// and forwarding to AudioManagerWin so it can notify downstream clients. Only
-// output (eRender) device changes are supported currently. WASAPI support is
-// required to construct this object. Must be constructed and destructed on a
-// single COM initialized thread.
-// TODO(henrika): Refactor based on upcoming CoreAudioUtil class for windows.
+// output (eRender) device changes are supported currently. Core Audio support
+// is required to construct this object. Must be constructed and destructed on
+// a single COM initialized thread.
// TODO(dalecurtis, henrika): Support input device changes.
class MEDIA_EXPORT AudioDeviceListenerWin : public IMMNotificationClient {
public:
diff --git a/media/audio/win/audio_unified_win.cc b/media/audio/win/audio_unified_win.cc
index aa49f88..07f43db 100644
--- a/media/audio/win/audio_unified_win.cc
+++ b/media/audio/win/audio_unified_win.cc
@@ -10,6 +10,7 @@
#include "base/win/scoped_com_initializer.h"
#include "media/audio/win/audio_manager_win.h"
#include "media/audio/win/avrt_wrapper_win.h"
+#include "media/audio/win/core_audio_util_win.h"
using base::win::ScopedComPtr;
using base::win::ScopedCOMInitializer;
@@ -17,150 +18,24 @@ using base::win::ScopedCoMem;
namespace media {
-static HRESULT GetMixFormat(EDataFlow data_flow, WAVEFORMATEX** device_format) {
- // It is assumed that this static method is called from a COM thread, i.e.,
- // CoInitializeEx() is not called here again to avoid STA/MTA conflicts.
- ScopedComPtr<IMMDeviceEnumerator> enumerator;
- HRESULT hr = CoCreateInstance(__uuidof(MMDeviceEnumerator),
- NULL,
- CLSCTX_INPROC_SERVER,
- __uuidof(IMMDeviceEnumerator),
- enumerator.ReceiveVoid());
- if (FAILED(hr)) {
- NOTREACHED() << "error code: " << std::hex << hr;
- return hr;
- }
-
- ScopedComPtr<IMMDevice> endpoint_device;
- hr = enumerator->GetDefaultAudioEndpoint(data_flow,
- eConsole,
- endpoint_device.Receive());
- if (FAILED(hr)) {
- LOG(WARNING) << "No audio end point: " << std::hex << hr;
- return hr;
- }
-
- ScopedComPtr<IAudioClient> audio_client;
- hr = endpoint_device->Activate(__uuidof(IAudioClient),
- CLSCTX_INPROC_SERVER,
- NULL,
- audio_client.ReceiveVoid());
- DCHECK(SUCCEEDED(hr)) << "Failed to activate device: " << std::hex << hr;
- if (SUCCEEDED(hr)) {
- // Retrieve the stream format that the audio engine uses for its internal
- // processing/mixing of shared-mode streams.
- hr = audio_client->GetMixFormat(device_format);
- DCHECK(SUCCEEDED(hr)) << "GetMixFormat: " << std::hex << hr;
- }
-
- return hr;
-}
-
-static ScopedComPtr<IMMDevice> CreateDefaultAudioDevice(EDataFlow data_flow) {
- ScopedComPtr<IMMDeviceEnumerator> device_enumerator;
- ScopedComPtr<IMMDevice> endpoint_device;
-
- // Create the IMMDeviceEnumerator interface.
- HRESULT hr = CoCreateInstance(__uuidof(MMDeviceEnumerator),
- NULL,
- CLSCTX_INPROC_SERVER,
- __uuidof(IMMDeviceEnumerator),
- device_enumerator.ReceiveVoid());
- if (SUCCEEDED(hr)) {
- // Retrieve the default render audio endpoint for the specified role.
- hr = device_enumerator->GetDefaultAudioEndpoint(
- data_flow, eConsole, endpoint_device.Receive());
-
- if (FAILED(hr)) {
- PLOG(ERROR) << "GetDefaultAudioEndpoint: " << std::hex << hr;
- return endpoint_device;
- }
-
- // Verify that the audio endpoint device is active. That is, the audio
- // adapter that connects to the endpoint device is present and enabled.
- DWORD state = DEVICE_STATE_DISABLED;
- hr = endpoint_device->GetState(&state);
- if (SUCCEEDED(hr)) {
- if (!(state & DEVICE_STATE_ACTIVE)) {
- PLOG(ERROR) << "Selected render device is not active.";
- endpoint_device.Release();
- }
- }
- }
-
- return endpoint_device;
-}
-
-static ScopedComPtr<IAudioClient> CreateAudioClient(IMMDevice* audio_device) {
- ScopedComPtr<IAudioClient> audio_client;
-
- // Creates and activates an IAudioClient COM object given the selected
- // endpoint device.
- HRESULT hr = audio_device->Activate(__uuidof(IAudioClient),
- CLSCTX_INPROC_SERVER,
- NULL,
- audio_client.ReceiveVoid());
- PLOG_IF(ERROR, FAILED(hr)) << "IMMDevice::Activate: " << std::hex << hr;
- return audio_client;
-}
-
-static bool IsFormatSupported(IAudioClient* audio_client,
- WAVEFORMATPCMEX* format) {
- ScopedCoMem<WAVEFORMATEXTENSIBLE> closest_match;
- HRESULT hr = audio_client->IsFormatSupported(
- AUDCLNT_SHAREMODE_SHARED, reinterpret_cast<WAVEFORMATEX*>(format),
- reinterpret_cast<WAVEFORMATEX**>(&closest_match));
-
- // This log can only be triggered for shared mode.
- DLOG_IF(ERROR, hr == S_FALSE) << "Format is not supported "
- << "but a closest match exists.";
- // This log can be triggered both for shared and exclusive modes.
- DLOG_IF(ERROR, hr == AUDCLNT_E_UNSUPPORTED_FORMAT) << "Unsupported format.";
- if (hr == S_FALSE) {
- DVLOG(1) << "wFormatTag : " << closest_match->Format.wFormatTag;
- DVLOG(1) << "nChannels : " << closest_match->Format.nChannels;
- DVLOG(1) << "nSamplesPerSec: " << closest_match->Format.nSamplesPerSec;
- DVLOG(1) << "wBitsPerSample: " << closest_match->Format.wBitsPerSample;
- }
-
- return (hr == S_OK);
-}
-
-// Get the default scheduling period for a shared-mode stream in a specified
-// direction. Note that the period between processing passes by the audio
-// engine is fixed for a particular audio endpoint device and represents the
-// smallest processing quantum for the audio engine.
-static REFERENCE_TIME GetAudioEngineDevicePeriod(EDataFlow data_flow) {
- ScopedComPtr<IMMDevice> endpoint_device = CreateDefaultAudioDevice(data_flow);
- if (!endpoint_device)
- return 0;
-
- ScopedComPtr<IAudioClient> audio_client;
- audio_client = CreateAudioClient(endpoint_device);
- if (!audio_client)
- return 0;
-
- REFERENCE_TIME default_device_period = 0;
- REFERENCE_TIME minimum_device_period = 0;
-
- // Times are expressed in 100-nanosecond units.
- HRESULT hr = audio_client->GetDevicePeriod(&default_device_period,
- &minimum_device_period);
- if (SUCCEEDED(hr)) {
- std::string flow = (data_flow == eCapture) ? "[in] " : "[out] ";
- DVLOG(1) << flow << "default_device_period: " << default_device_period;
- DVLOG(1) << flow << "minimum_device_period: " << minimum_device_period;
-
- return default_device_period;
- }
-
- return 0;
+// Compare two sets of audio parameters and return true if they are equal.
+// Note that bits_per_sample() is excluded from this comparison since Core
+// Audio can deal with most bit depths. As an example, if the native/mixing
+// bit depth is 32 bits (default), opening at 16 or 24 still works fine and
+// the audio engine will do the required conversion for us.
+static bool CompareAudioParameters(const AudioParameters& a,
+ const AudioParameters& b) {
+ return (a.format() == b.format() &&
+ a.channels() == b.channels() &&
+ a.sample_rate() == b.sample_rate() &&
+ a.frames_per_buffer() == b.frames_per_buffer());
}
WASAPIUnifiedStream::WASAPIUnifiedStream(AudioManagerWin* manager,
const AudioParameters& params)
: creating_thread_id_(base::PlatformThread::CurrentId()),
manager_(manager),
+ share_mode_(CoreAudioUtil::GetShareMode()),
audio_io_thread_(NULL),
opened_(false),
endpoint_render_buffer_size_frames_(0),
@@ -170,8 +45,19 @@ WASAPIUnifiedStream::WASAPIUnifiedStream(AudioManagerWin* manager,
render_bus_(AudioBus::Create(params)) {
DCHECK(manager_);
- LOG_IF(ERROR, !HasUnifiedDefaultIO())
- << "Unified audio I/O is not supported.";
+ DVLOG_IF(1, !HasUnifiedDefaultIO()) << "Unified audio I/O is not supported.";
+ DVLOG_IF(1, share_mode_ == AUDCLNT_SHAREMODE_EXCLUSIVE)
+ << "Core Audio (WASAPI) EXCLUSIVE MODE is enabled.";
+
+#if !defined(NDEBUG)
+ // Add log message if input parameters are not identical to the preferred
+ // parameters.
+ AudioParameters mix_params;
+ HRESULT hr = CoreAudioUtil::GetPreferredAudioParameters(
+ eRender, eConsole, &mix_params);
+ DVLOG_IF(1, SUCCEEDED(hr) && !CompareAudioParameters(params, mix_params)) <<
+ "Input and preferred parameters are not identical.";
+#endif
// Load the Avrt DLL if not already loaded. Required to support MMCSS.
bool avrt_init = avrt::Initialize();
@@ -224,53 +110,61 @@ bool WASAPIUnifiedStream::Open() {
}
// Render side:
- // IMMDeviceEnumerator -> IMMDevice
- // IMMDevice -> IAudioClient
- // IAudioClient -> IAudioRenderClient
-
- ScopedComPtr<IMMDevice> render_device = CreateDefaultAudioDevice(eRender);
- if (!render_device)
- return false;
ScopedComPtr<IAudioClient> audio_output_client =
- CreateAudioClient(render_device);
+ CoreAudioUtil::CreateDefaultClient(eRender, eConsole);
if (!audio_output_client)
return false;
- if (!IsFormatSupported(audio_output_client, &format_))
+ if (!CoreAudioUtil::IsFormatSupported(audio_output_client,
+ share_mode_,
+ &format_)) {
+ return false;
+ }
+
+ HRESULT hr = S_FALSE;
+ if (share_mode_ == AUDCLNT_SHAREMODE_SHARED) {
+ hr = CoreAudioUtil::SharedModeInitialize(
+ audio_output_client, &format_, NULL,
+ &endpoint_render_buffer_size_frames_);
+ } else {
+ // TODO(henrika): add support for AUDCLNT_SHAREMODE_EXCLUSIVE.
+ }
+ if (FAILED(hr))
return false;
ScopedComPtr<IAudioRenderClient> audio_render_client =
- CreateAudioRenderClient(audio_output_client);
+ CoreAudioUtil::CreateRenderClient(audio_output_client);
if (!audio_render_client)
return false;
// Capture side:
- // IMMDeviceEnumerator -> IMMDevice
- // IMMDevice -> IAudioClient
- // IAudioClient -> IAudioCaptureClient
-
- ScopedComPtr<IMMDevice> capture_device = CreateDefaultAudioDevice(eCapture);
- if (!capture_device)
- return false;
ScopedComPtr<IAudioClient> audio_input_client =
- CreateAudioClient(capture_device);
+ CoreAudioUtil::CreateDefaultClient(eCapture, eConsole);
if (!audio_input_client)
return false;
- if (!IsFormatSupported(audio_input_client, &format_))
+ if (!CoreAudioUtil::IsFormatSupported(audio_input_client,
+ share_mode_,
+ &format_)) {
return false;
+ }
- ScopedComPtr<IAudioCaptureClient> audio_capture_client =
- CreateAudioCaptureClient(audio_input_client);
- if (!audio_capture_client)
+ if (share_mode_ == AUDCLNT_SHAREMODE_SHARED) {
+ // Include valid event handle for event-driven initialization.
+ hr = CoreAudioUtil::SharedModeInitialize(
+ audio_input_client, &format_, capture_event_.Get(),
+ &endpoint_capture_buffer_size_frames_);
+ } else {
+ // TODO(henrika): add support for AUDCLNT_SHAREMODE_EXCLUSIVE.
+ }
+ if (FAILED(hr))
return false;
- // Set the event handle that the audio engine will signal each time
- // a buffer becomes ready to be processed by the client.
- HRESULT hr = audio_input_client->SetEventHandle(capture_event_.Get());
- if (FAILED(hr))
+ ScopedComPtr<IAudioCaptureClient> audio_capture_client =
+ CoreAudioUtil::CreateCaptureClient(audio_input_client);
+ if (!audio_capture_client)
return false;
// Store all valid COM interfaces.
@@ -295,26 +189,6 @@ void WASAPIUnifiedStream::Start(AudioSourceCallback* callback) {
source_ = callback;
- // Avoid start-up glitches by filling up the endpoint buffer with "silence"
- // before starting the stream.
- BYTE* data_ptr = NULL;
- HRESULT hr = audio_render_client_->GetBuffer(
- endpoint_render_buffer_size_frames_, &data_ptr);
- if (FAILED(hr)) {
- DLOG(ERROR) << "Failed to use rendering audio buffer: " << std::hex << hr;
- return;
- }
-
- // Using the AUDCLNT_BUFFERFLAGS_SILENT flag eliminates the need to
- // explicitly write silence data to the rendering buffer.
- audio_render_client_->ReleaseBuffer(endpoint_render_buffer_size_frames_,
- AUDCLNT_BUFFERFLAGS_SILENT);
-
- // Sanity check: verify that the endpoint buffer is filled with silence.
- UINT32 num_queued_frames = 0;
- audio_output_client_->GetCurrentPadding(&num_queued_frames);
- DCHECK(num_queued_frames == endpoint_render_buffer_size_frames_);
-
// Create and start the thread that will capturing and rendering.
audio_io_thread_.reset(
new base::DelegateSimpleThread(this, "wasapi_io_thread"));
@@ -326,7 +200,7 @@ void WASAPIUnifiedStream::Start(AudioSourceCallback* callback) {
// Start input streaming data between the endpoint buffer and the audio
// engine.
- hr = audio_input_client_->Start();
+ HRESULT hr = audio_input_client_->Start();
if (FAILED(hr)) {
StopAndJoinThread(hr);
return;
@@ -388,7 +262,7 @@ void WASAPIUnifiedStream::Stop() {
// Extra safety check to ensure that the buffers are cleared.
// If the buffers are not cleared correctly, the next call to Start()
// would fail with AUDCLNT_E_BUFFER_ERROR at IAudioRenderClient::GetBuffer().
- // This check is is only needed for shared-mode streams.
+ // TODO(henrika): this check is is only needed for shared-mode streams.
UINT32 num_queued_frames = 0;
audio_output_client_->GetCurrentPadding(&num_queued_frames);
DCHECK_EQ(0u, num_queued_frames);
@@ -416,61 +290,19 @@ void WASAPIUnifiedStream::GetVolume(double* volume) {
// static
bool WASAPIUnifiedStream::HasUnifiedDefaultIO() {
- int output_size = HardwareBufferSize(eRender);
- int input_size = HardwareBufferSize(eCapture);
- int output_channels = HardwareChannelCount(eRender);
- int input_channels = HardwareChannelCount(eCapture);
- return ((output_size == input_size) && (output_channels == input_channels));
-}
-
-// static
-int WASAPIUnifiedStream::HardwareChannelCount(EDataFlow data_flow) {
- base::win::ScopedCoMem<WAVEFORMATPCMEX> format_ex;
- HRESULT hr = GetMixFormat(
- data_flow, reinterpret_cast<WAVEFORMATEX**>(&format_ex));
+ AudioParameters in_params;
+ HRESULT hr = CoreAudioUtil::GetPreferredAudioParameters(eCapture, eConsole,
+ &in_params);
if (FAILED(hr))
- return 0;
-
- // Number of channels in the stream. Corresponds to the number of bits
- // set in the dwChannelMask.
- std::string flow = (data_flow == eCapture) ? "[in] " : "[out] ";
- DVLOG(1) << flow << "endpoint channels: "
- << format_ex->Format.nChannels;
-
- return static_cast<int>(format_ex->Format.nChannels);
-}
+ return false;
-// static
-int WASAPIUnifiedStream::HardwareSampleRate(EDataFlow data_flow) {
- base::win::ScopedCoMem<WAVEFORMATEX> format;
- HRESULT hr = GetMixFormat(data_flow, &format);
+ AudioParameters out_params;
+ hr = CoreAudioUtil::GetPreferredAudioParameters(eRender, eConsole,
+ &out_params);
if (FAILED(hr))
- return 0;
-
- std::string flow = (data_flow == eCapture) ? "[in] " : "[out] ";
- DVLOG(1) << flow << "nSamplesPerSec: " << format->nSamplesPerSec;
- return static_cast<int>(format->nSamplesPerSec);
-}
+ return false;
-// static
-int WASAPIUnifiedStream::HardwareBufferSize(EDataFlow data_flow) {
- int sample_rate = HardwareSampleRate(data_flow);
- if (sample_rate == 0)
- return 0;
-
- // Number of 100-nanosecond units per second.
- const float kRefTimesPerSec = 10000000.0f;
-
- // A typical value of |device_period| is 100000 which corresponds to
- // 0.01 seconds or 10 milliseconds. Given a sample rate of 48000 Hz,
- // this device period results in a |buffer_size| of 480 audio frames.
- REFERENCE_TIME device_period = GetAudioEngineDevicePeriod(data_flow);
- int buffer_size = static_cast<int>(
- ((sample_rate * device_period) / kRefTimesPerSec) + 0.5);
- std::string flow = (data_flow == eCapture) ? "[in] " : "[out] ";
- DVLOG(1) << flow << "buffer size: " << buffer_size;
-
- return buffer_size;
+ return CompareAudioParameters(in_params, out_params);
}
void WASAPIUnifiedStream::Run() {
@@ -634,75 +466,4 @@ void WASAPIUnifiedStream::StopAndJoinThread(HRESULT err) {
HandleError(err);
}
-ScopedComPtr<IAudioRenderClient> WASAPIUnifiedStream::CreateAudioRenderClient(
- IAudioClient* audio_client) {
- ScopedComPtr<IAudioRenderClient> audio_render_client;
- HRESULT hr = S_FALSE;
-
- // Initialize the audio stream between the client and the device in shared
- // push mode (will not signal an event).
- hr = audio_client->Initialize(AUDCLNT_SHAREMODE_SHARED,
- AUDCLNT_STREAMFLAGS_NOPERSIST,
- 0,
- 0,
- reinterpret_cast<WAVEFORMATEX*>(&format_),
- NULL);
- if (FAILED(hr)) {
- LOG(WARNING) << "IAudioClient::Initialize() failed: " << std::hex << hr;
- return audio_render_client;
- }
-
- // Retrieve the length of the render endpoint buffer shared between the
- // client and the audio engine.
- hr = audio_client->GetBufferSize(&endpoint_render_buffer_size_frames_);
- if (FAILED(hr))
- return audio_render_client;
- DVLOG(1) << "render endpoint buffer size: "
- << endpoint_render_buffer_size_frames_ << " [frames]";
-
- // Get access to the IAudioRenderClient interface. This interface
- // enables us to write output data to a rendering endpoint buffer.
- hr = audio_client->GetService(__uuidof(IAudioRenderClient),
- audio_render_client.ReceiveVoid());
- if (FAILED(hr)) {
- LOG(WARNING) << "IAudioClient::GetService() failed: " << std::hex << hr;
- return audio_render_client;
- }
-
- return audio_render_client;
-}
-
-ScopedComPtr<IAudioCaptureClient>
-WASAPIUnifiedStream::CreateAudioCaptureClient(IAudioClient* audio_client) {
- ScopedComPtr<IAudioCaptureClient> audio_capture_client;
- HRESULT hr = S_FALSE;
-
- // Use event driven audio-buffer processing, i.e, the audio engine will
- // inform us (by signaling an event) when data has been recorded.
- hr = audio_client->Initialize(AUDCLNT_SHAREMODE_SHARED,
- AUDCLNT_STREAMFLAGS_EVENTCALLBACK |
- AUDCLNT_STREAMFLAGS_NOPERSIST,
- 0,
- 0,
- reinterpret_cast<WAVEFORMATEX*>(&format_),
- NULL);
- if (FAILED(hr))
- return audio_capture_client;
-
- // Retrieve the length of the capture endpoint buffer shared between the
- // client and the audio engine.
- hr = audio_client->GetBufferSize(&endpoint_capture_buffer_size_frames_);
- if (FAILED(hr))
- return audio_capture_client;
- DVLOG(1) << "capture endpoint buffer size: "
- << endpoint_capture_buffer_size_frames_ << " [frames]";
-
- // Get access to the IAudioCaptureClient interface. This interface
- // enables us to read input data from the capture endpoint buffer.
- hr = audio_client->GetService(__uuidof(IAudioCaptureClient),
- audio_capture_client.ReceiveVoid());
-
- return audio_capture_client;
-}
-
} // namespace media
diff --git a/media/audio/win/audio_unified_win.h b/media/audio/win/audio_unified_win.h
index 3c1e281..4bfe3d7 100644
--- a/media/audio/win/audio_unified_win.h
+++ b/media/audio/win/audio_unified_win.h
@@ -25,19 +25,19 @@ namespace media {
class AudioManagerWin;
-// Implementation of AudioOutputStream for Windows using the WASAPI Core
-// Audio interface where both capturing and rendering takes place on the
-// same thread to enable audio I/O.
+// Implementation of AudioOutputStream for Windows using the Core Audio API
+// where both capturing and rendering takes place on the same thread to enable
+// audio I/O.
//
-// Best performance is achieved by using a buffer size given by the static
-// HardwareBufferSize() method. The user should also ensure that audio I/O
-// is supported by calling HasUnifiedDefaultIO().
+// The user should also ensure that audio I/O is supported by calling
+// HasUnifiedDefaultIO().
//
// Implementation notes:
//
// - Certain conditions must be fulfilled to support audio I/O:
// o Both capture and render side must use the same sample rate.
// o Both capture and render side must use the same channel count.
+// o Both capture and render side must use the same channel configuration.
// o See HasUnifiedDefaultIO() for more details.
//
// TODO(henrika):
@@ -73,26 +73,6 @@ class MEDIA_EXPORT WASAPIUnifiedStream
// channel count.
static bool HasUnifiedDefaultIO();
- // Retrieves the number of channels the audio engine uses for its internal
- // processing/mixing of shared-mode streams for the default endpoint device
- // and in the given direction.
- static int HardwareChannelCount(EDataFlow data_flow);
-
- // Retrieves the sample rate the audio engine uses for its internal
- // processing/mixing of shared-mode streams for the default endpoint device
- // and in the given direction.
- static int HardwareSampleRate(EDataFlow data_flow);
-
- // Retrieves the preferred buffer size for the default endpoint device and
- // in the given direction. The recommended size is given by the mixing
- // sample rate and the native device period for the audio device.
- // Unit is in number of audio frames.
- // Examples:
- // fs = 96000 Hz => 960
- // fs = 48000 Hz => 480
- // fs = 44100 Hz => 441 or 448 (depends on the audio hardware)
- static int HardwareBufferSize(EDataFlow data_flow);
-
bool started() const {
return audio_io_thread_.get() != NULL;
}
@@ -107,13 +87,6 @@ class MEDIA_EXPORT WASAPIUnifiedStream
// Stops and joins the audio thread in case of an error.
void StopAndJoinThread(HRESULT err);
- // Helper methods which uses an IAudioClient to create and setup
- // IAudio[Render|Capture]Clients.
- base::win::ScopedComPtr<IAudioRenderClient> CreateAudioRenderClient(
- IAudioClient* audio_client);
- base::win::ScopedComPtr<IAudioCaptureClient> CreateAudioCaptureClient(
- IAudioClient* audio_client);
-
// Converts unique endpoint ID to user-friendly device name.
std::string GetDeviceName(LPCWSTR device_id) const;
@@ -127,6 +100,11 @@ class MEDIA_EXPORT WASAPIUnifiedStream
// Our creator, the audio manager needs to be notified when we close.
AudioManagerWin* manager_;
+ // The sharing mode for the streams.
+ // Valid values are AUDCLNT_SHAREMODE_SHARED and AUDCLNT_SHAREMODE_EXCLUSIVE
+ // where AUDCLNT_SHAREMODE_SHARED is the default.
+ AUDCLNT_SHAREMODE share_mode_;
+
// Rendering and capturing is driven by this thread (no message loop).
// All OnMoreIOData() callbacks will be called from this thread.
scoped_ptr<base::DelegateSimpleThread> audio_io_thread_;
diff --git a/media/audio/win/audio_unified_win_unittest.cc b/media/audio/win/audio_unified_win_unittest.cc
index d252daf..7cb0350 100644
--- a/media/audio/win/audio_unified_win_unittest.cc
+++ b/media/audio/win/audio_unified_win_unittest.cc
@@ -14,6 +14,7 @@
#include "media/audio/audio_manager.h"
#include "media/audio/audio_util.h"
#include "media/audio/win/audio_unified_win.h"
+#include "media/audio/win/core_audio_util_win.h"
#include "media/base/media_switches.h"
#include "testing/gmock/include/gmock/gmock.h"
#include "testing/gtest/include/gtest/gtest.h"
@@ -122,7 +123,7 @@ static bool CanRunUnifiedAudioTests(AudioManager* audio_man) {
return false;
}
- if (!media::IsWASAPISupported()) {
+ if (!CoreAudioUtil::IsSupported()) {
LOG(WARNING) << "This tests requires Windows Vista or higher.";
return false;
}
@@ -151,12 +152,13 @@ class AudioUnifiedStreamWrapper {
public:
explicit AudioUnifiedStreamWrapper(AudioManager* audio_manager)
: com_init_(ScopedCOMInitializer::kMTA),
- audio_man_(audio_manager),
- format_(AudioParameters::AUDIO_PCM_LOW_LATENCY),
- channel_layout_(CHANNEL_LAYOUT_STEREO),
- bits_per_sample_(16) {
- sample_rate_ = media::GetAudioHardwareSampleRate();
- samples_per_packet_ = media::GetAudioHardwareBufferSize();
+ audio_man_(audio_manager) {
+ // We open up both both sides (input and output) using the preferred
+ // set of audio parameters. These parameters corresponds to the mix format
+ // that the audio engine uses internally for processing of shared-mode
+ // output streams.
+ EXPECT_TRUE(SUCCEEDED(CoreAudioUtil::GetPreferredAudioParameters(
+ eRender, eConsole, &params_)));
}
~AudioUnifiedStreamWrapper() {}
@@ -166,29 +168,22 @@ class AudioUnifiedStreamWrapper {
return static_cast<WASAPIUnifiedStream*> (CreateOutputStream());
}
- AudioParameters::Format format() const { return format_; }
- int channels() const { return ChannelLayoutToChannelCount(channel_layout_); }
- int bits_per_sample() const { return bits_per_sample_; }
- int sample_rate() const { return sample_rate_; }
- int samples_per_packet() const { return samples_per_packet_; }
+ AudioParameters::Format format() const { return params_.format(); }
+ int channels() const { return params_.channels(); }
+ int bits_per_sample() const { return params_.bits_per_sample(); }
+ int sample_rate() const { return params_.sample_rate(); }
+ int frames_per_buffer() const { return params_.frames_per_buffer(); }
private:
AudioOutputStream* CreateOutputStream() {
- AudioOutputStream* aos = audio_man_->MakeAudioOutputStream(
- AudioParameters(format_, channel_layout_, sample_rate_,
- bits_per_sample_, samples_per_packet_));
+ AudioOutputStream* aos = audio_man_->MakeAudioOutputStream(params_);
EXPECT_TRUE(aos);
return aos;
}
ScopedCOMInitializer com_init_;
AudioManager* audio_man_;
-
- AudioParameters::Format format_;
- ChannelLayout channel_layout_;
- int bits_per_sample_;
- int sample_rate_;
- int samples_per_packet_;
+ AudioParameters params_;
};
// Convenience method which creates a default WASAPIUnifiedStream object.
@@ -224,7 +219,7 @@ TEST(WASAPIUnifiedStreamTest, OpenStartAndClose) {
.Times(0);
EXPECT_CALL(source, OnMoreIOData(NotNull(), NotNull(), _))
.Times(Between(0, 1))
- .WillOnce(Return(ausw.samples_per_packet()));
+ .WillOnce(Return(ausw.frames_per_buffer()));
wus->Start(&source);
wus->Close();
}
@@ -245,10 +240,10 @@ TEST(WASAPIUnifiedStreamTest, StartLoopbackAudio) {
.Times(0);
EXPECT_CALL(source, OnMoreIOData(NotNull(), NotNull(), _))
.Times(AtLeast(2))
- .WillOnce(Return(ausw.samples_per_packet()))
+ .WillOnce(Return(ausw.frames_per_buffer()))
.WillOnce(DoAll(
QuitLoop(loop.message_loop_proxy()),
- Return(ausw.samples_per_packet())));
+ Return(ausw.frames_per_buffer())));
wus->Start(&source);
loop.PostDelayedTask(FROM_HERE, MessageLoop::QuitClosure(),
TestTimeouts::action_timeout());
diff --git a/media/audio/win/core_audio_util_win.cc b/media/audio/win/core_audio_util_win.cc
index 0078447..e160f74 100644
--- a/media/audio/win/core_audio_util_win.cc
+++ b/media/audio/win/core_audio_util_win.cc
@@ -7,16 +7,76 @@
#include <Audioclient.h>
#include <Functiondiscoverykeys_devpkey.h>
+#include "base/command_line.h"
#include "base/logging.h"
#include "base/stringprintf.h"
#include "base/utf_string_conversions.h"
#include "base/win/scoped_co_mem.h"
+#include "base/win/scoped_handle.h"
#include "base/win/windows_version.h"
+#include "media/base/media_switches.h"
+#include "base/time.h"
using base::win::ScopedCoMem;
+using base::win::ScopedHandle;
namespace media {
+typedef uint32 ChannelConfig;
+
+// Converts Microsoft's channel configuration to ChannelLayout.
+// This mapping is not perfect but the best we can do given the current
+// ChannelLayout enumerator and the Windows-specific speaker configurations
+// defined in ksmedia.h. Don't assume that the channel ordering in
+// ChannelLayout is exactly the same as the Windows specific configuration.
+// As an example: KSAUDIO_SPEAKER_7POINT1_SURROUND is mapped to
+// CHANNEL_LAYOUT_7_1 but the positions of Back L, Back R and Side L, Side R
+// speakers are different in these two definitions.
+static ChannelLayout ChannelConfigToChannelLayout(ChannelConfig config) {
+ switch (config) {
+ case KSAUDIO_SPEAKER_DIRECTOUT:
+ DVLOG(2) << "KSAUDIO_SPEAKER_DIRECTOUT=>CHANNEL_LAYOUT_NONE";
+ return CHANNEL_LAYOUT_NONE;
+ case KSAUDIO_SPEAKER_MONO:
+ DVLOG(2) << "KSAUDIO_SPEAKER_MONO=>CHANNEL_LAYOUT_MONO";
+ return CHANNEL_LAYOUT_MONO;
+ case KSAUDIO_SPEAKER_STEREO:
+ DVLOG(2) << "KSAUDIO_SPEAKER_STEREO=>CHANNEL_LAYOUT_STEREO";
+ return CHANNEL_LAYOUT_STEREO;
+ case KSAUDIO_SPEAKER_QUAD:
+ DVLOG(2) << "KSAUDIO_SPEAKER_QUAD=>CHANNEL_LAYOUT_QUAD";
+ return CHANNEL_LAYOUT_QUAD;
+ case KSAUDIO_SPEAKER_SURROUND:
+ DVLOG(2) << "KSAUDIO_SPEAKER_SURROUND=>CHANNEL_LAYOUT_4_0";
+ return CHANNEL_LAYOUT_4_0;
+ case KSAUDIO_SPEAKER_5POINT1:
+ DVLOG(2) << "KSAUDIO_SPEAKER_5POINT1=>CHANNEL_LAYOUT_5_1_BACK";
+ return CHANNEL_LAYOUT_5_1_BACK;
+ case KSAUDIO_SPEAKER_5POINT1_SURROUND:
+ DVLOG(2) << "KSAUDIO_SPEAKER_5POINT1_SURROUND=>CHANNEL_LAYOUT_5_1";
+ return CHANNEL_LAYOUT_5_1;
+ case KSAUDIO_SPEAKER_7POINT1:
+ DVLOG(2) << "KSAUDIO_SPEAKER_7POINT1=>CHANNEL_LAYOUT_7_1_WIDE";
+ return CHANNEL_LAYOUT_7_1_WIDE;
+ case KSAUDIO_SPEAKER_7POINT1_SURROUND:
+ DVLOG(2) << "KSAUDIO_SPEAKER_7POINT1_SURROUND=>CHANNEL_LAYOUT_7_1";
+ return CHANNEL_LAYOUT_7_1;
+ default:
+ DVLOG(2) << "Unsupported channel layout: " << config;
+ return CHANNEL_LAYOUT_UNSUPPORTED;
+ }
+}
+
+static double RefererenceTimeToMilliseconds(REFERENCE_TIME time) {
+ // Each unit of reference time is 100 nanoseconds <=> 0.1 microsecond.
+ return base::TimeDelta::FromMicroseconds(0.1 * time + 0.5).InMillisecondsF();
+}
+
+static double RefererenceTimeToSeconds(REFERENCE_TIME time) {
+ // Each unit of reference time is 100 nanoseconds <=> 0.1 microsecond.
+ return base::TimeDelta::FromMicroseconds(0.1 * time + 0.5).InSecondsF();
+}
+
// Scoped PROPVARIANT class for automatically freeing a COM PROPVARIANT
// structure at the end of a scope.
class ScopedPropertyVariant {
@@ -57,6 +117,13 @@ bool CoreAudioUtil::IsSupported() {
return (base::win::GetVersion() >= base::win::VERSION_VISTA);
}
+AUDCLNT_SHAREMODE CoreAudioUtil::GetShareMode() {
+ const CommandLine* cmd_line = CommandLine::ForCurrentProcess();
+ if (cmd_line->HasSwitch(switches::kEnableExclusiveAudio))
+ return AUDCLNT_SHAREMODE_EXCLUSIVE;
+ return AUDCLNT_SHAREMODE_SHARED;
+}
+
int CoreAudioUtil::NumberOfActiveDevices(EDataFlow data_flow) {
DCHECK(CoreAudioUtil::IsSupported());
// Create the IMMDeviceEnumerator interface.
@@ -80,7 +147,7 @@ int CoreAudioUtil::NumberOfActiveDevices(EDataFlow data_flow) {
// Retrieve the number of active audio devices for the specified direction
UINT number_of_active_devices = 0;
collection->GetCount(&number_of_active_devices);
- DVLOG(1) << ((data_flow == eCapture) ? "[in ] " : "[out] ")
+ DVLOG(2) << ((data_flow == eCapture) ? "[in ] " : "[out] ")
<< "number of devices: " << number_of_active_devices;
return static_cast<int>(number_of_active_devices);
}
@@ -156,11 +223,10 @@ ScopedComPtr<IMMDevice> CoreAudioUtil::CreateDevice(
HRESULT CoreAudioUtil::GetDeviceName(IMMDevice* device, AudioDeviceName* name) {
DCHECK(CoreAudioUtil::IsSupported());
- DCHECK(device);
- AudioDeviceName device_name;
// Retrieve unique name of endpoint device.
// Example: "{0.0.1.00000000}.{8db6020f-18e3-4f25-b6f5-7726c9122574}".
+ AudioDeviceName device_name;
ScopedCoMem<WCHAR> endpoint_device_id;
HRESULT hr = device->GetId(&endpoint_device_id);
if (FAILED(hr))
@@ -185,14 +251,13 @@ HRESULT CoreAudioUtil::GetDeviceName(IMMDevice* device, AudioDeviceName* name) {
}
*name = device_name;
- DVLOG(1) << "friendly name: " << device_name.device_name;
- DVLOG(1) << "unique id : " << device_name.unique_id;
+ DVLOG(2) << "friendly name: " << device_name.device_name;
+ DVLOG(2) << "unique id : " << device_name.unique_id;
return hr;
}
std::string CoreAudioUtil::GetFriendlyName(const std::string& device_id) {
DCHECK(CoreAudioUtil::IsSupported());
-
ScopedComPtr<IMMDevice> audio_device = CreateDevice(device_id);
if (!audio_device)
return std::string();
@@ -227,8 +292,6 @@ bool CoreAudioUtil::DeviceIsDefault(EDataFlow flow,
EDataFlow CoreAudioUtil::GetDataFlow(IMMDevice* device) {
DCHECK(CoreAudioUtil::IsSupported());
- DCHECK(device);
-
ScopedComPtr<IMMEndpoint> endpoint;
HRESULT hr = device->QueryInterface(endpoint.Receive());
if (FAILED(hr)) {
@@ -248,10 +311,10 @@ EDataFlow CoreAudioUtil::GetDataFlow(IMMDevice* device) {
ScopedComPtr<IAudioClient> CoreAudioUtil::CreateClient(
IMMDevice* audio_device) {
DCHECK(CoreAudioUtil::IsSupported());
- ScopedComPtr<IAudioClient> audio_client;
// Creates and activates an IAudioClient COM object given the selected
// endpoint device.
+ ScopedComPtr<IAudioClient> audio_client;
HRESULT hr = audio_device->Activate(__uuidof(IAudioClient),
CLSCTX_INPROC_SERVER,
NULL,
@@ -260,4 +323,257 @@ ScopedComPtr<IAudioClient> CoreAudioUtil::CreateClient(
return audio_client;
}
+ScopedComPtr<IAudioClient> CoreAudioUtil::CreateDefaultClient(
+ EDataFlow data_flow, ERole role) {
+ DCHECK(CoreAudioUtil::IsSupported());
+ ScopedComPtr<IMMDevice> default_device(CreateDefaultDevice(data_flow, role));
+ return (default_device ? CreateClient(default_device) :
+ ScopedComPtr<IAudioClient>());
+}
+
+HRESULT CoreAudioUtil::GetSharedModeMixFormat(
+ IAudioClient* client, WAVEFORMATPCMEX* format) {
+ DCHECK(CoreAudioUtil::IsSupported());
+ ScopedCoMem<WAVEFORMATPCMEX> format_pcmex;
+ HRESULT hr = client->GetMixFormat(
+ reinterpret_cast<WAVEFORMATEX**>(&format_pcmex));
+ if (FAILED(hr))
+ return hr;
+
+ size_t bytes = sizeof(WAVEFORMATEX) + format_pcmex->Format.cbSize;
+ DCHECK_EQ(bytes, sizeof(WAVEFORMATPCMEX));
+
+ memcpy(format, format_pcmex, bytes);
+
+ DVLOG(2) << "wFormatTag: 0x" << std::hex << format->Format.wFormatTag
+ << ", nChannels: " << std::dec << format->Format.nChannels
+ << ", nSamplesPerSec: " << format->Format.nSamplesPerSec
+ << ", nAvgBytesPerSec: " << format->Format.nAvgBytesPerSec
+ << ", nBlockAlign: " << format->Format.nBlockAlign
+ << ", wBitsPerSample: " << format->Format.wBitsPerSample
+ << ", cbSize: " << format->Format.cbSize
+ << ", wValidBitsPerSample: " << format->Samples.wValidBitsPerSample
+ << ", dwChannelMask: 0x" << std::hex << format->dwChannelMask;
+
+ return hr;
+}
+
+bool CoreAudioUtil::IsFormatSupported(IAudioClient* client,
+ AUDCLNT_SHAREMODE share_mode,
+ const WAVEFORMATPCMEX* format) {
+ DCHECK(CoreAudioUtil::IsSupported());
+ ScopedCoMem<WAVEFORMATEXTENSIBLE> closest_match;
+ HRESULT hr = client->IsFormatSupported(
+ share_mode, reinterpret_cast<const WAVEFORMATEX*>(format),
+ reinterpret_cast<WAVEFORMATEX**>(&closest_match));
+
+ // This log can only be triggered for shared mode.
+ DLOG_IF(ERROR, hr == S_FALSE) << "Format is not supported "
+ << "but a closest match exists.";
+ // This log can be triggered both for shared and exclusive modes.
+ DLOG_IF(ERROR, hr == AUDCLNT_E_UNSUPPORTED_FORMAT) << "Unsupported format.";
+ if (hr == S_FALSE) {
+ DVLOG(2) << "wFormatTag: " << closest_match->Format.wFormatTag
+ << ", nChannels: " << closest_match->Format.nChannels
+ << ", nSamplesPerSec: " << closest_match->Format.nSamplesPerSec
+ << ", wBitsPerSample: " << closest_match->Format.wBitsPerSample;
+ }
+
+ return (hr == S_OK);
+}
+
+HRESULT CoreAudioUtil::GetDevicePeriod(IAudioClient* client,
+ AUDCLNT_SHAREMODE share_mode,
+ REFERENCE_TIME* device_period) {
+ DCHECK(CoreAudioUtil::IsSupported());
+
+ // Get the period of the engine thread.
+ REFERENCE_TIME default_period = 0;
+ REFERENCE_TIME minimum_period = 0;
+ HRESULT hr = client->GetDevicePeriod(&default_period, &minimum_period);
+ if (FAILED(hr))
+ return hr;
+
+ *device_period = (share_mode == AUDCLNT_SHAREMODE_SHARED) ? default_period :
+ minimum_period;
+ DVLOG(2) << "device_period: "
+ << RefererenceTimeToMilliseconds(*device_period) << " [ms]";
+ return hr;
+}
+
+HRESULT CoreAudioUtil::GetPreferredAudioParameters(
+ IAudioClient* client, AudioParameters* params) {
+ DCHECK(CoreAudioUtil::IsSupported());
+ WAVEFORMATPCMEX format;
+ HRESULT hr = GetSharedModeMixFormat(client, &format);
+ if (FAILED(hr))
+ return hr;
+
+ REFERENCE_TIME default_period = 0;
+ hr = GetDevicePeriod(client, AUDCLNT_SHAREMODE_SHARED, &default_period);
+ if (FAILED(hr))
+ return hr;
+
+ // Get the integer mask which corresponds to the channel layout the
+ // audio engine uses for its internal processing/mixing of shared-mode
+ // streams. This mask indicates which channels are present in the multi-
+ // channel stream. The least significant bit corresponds with the Front Left
+ // speaker, the next least significant bit corresponds to the Front Right
+ // speaker, and so on, continuing in the order defined in KsMedia.h.
+ // See http://msdn.microsoft.com/en-us/library/windows/hardware/ff537083.aspx
+ // for more details.
+ ChannelConfig channel_config = format.dwChannelMask;
+
+ // Convert Microsoft's channel configuration to genric ChannelLayout.
+ ChannelLayout channel_layout = ChannelConfigToChannelLayout(channel_config);
+
+ // Store preferred sample rate, bit depth and buffer size.
+ int sample_rate = format.Format.nSamplesPerSec;
+ int bits_per_sample = format.Format.wBitsPerSample;
+ int frames_per_buffer = static_cast<int>(
+ sample_rate * RefererenceTimeToSeconds(default_period) + 0.5);
+
+ DVLOG(2) << "channel_layout : " << channel_layout;
+ DVLOG(2) << "frames_per_buffer: " << frames_per_buffer;
+
+ AudioParameters audio_params(AudioParameters::AUDIO_PCM_LOW_LATENCY,
+ channel_layout,
+ sample_rate,
+ bits_per_sample,
+ frames_per_buffer);
+
+ *params = audio_params;
+ return hr;
+}
+
+HRESULT CoreAudioUtil::GetPreferredAudioParameters(
+ EDataFlow data_flow, ERole role, AudioParameters* params) {
+ DCHECK(CoreAudioUtil::IsSupported());
+
+ ScopedComPtr<IAudioClient> client = CreateDefaultClient(data_flow, role);
+ if (!client) {
+ // Map NULL-pointer to new error code which can be different from the
+ // actual error code. The exact value is not important here.
+ return AUDCLNT_E_ENDPOINT_CREATE_FAILED;
+ }
+ return GetPreferredAudioParameters(client, params);
+}
+
+HRESULT CoreAudioUtil::SharedModeInitialize(IAudioClient* client,
+ const WAVEFORMATPCMEX* format,
+ HANDLE event_handle,
+ size_t* endpoint_buffer_size) {
+ DCHECK(CoreAudioUtil::IsSupported());
+
+ DWORD stream_flags = AUDCLNT_STREAMFLAGS_NOPERSIST;
+
+ // Enable event-driven streaming if a valid event handle is provided.
+ // After the stream starts, the audio engine will signal the event handle
+ // to notify the client each time a buffer becomes ready to process.
+ // Event-driven buffering is supported for both rendering and capturing.
+ // Both shared-mode and exclusive-mode streams can use event-driven buffering.
+ bool use_event = (event_handle != NULL &&
+ event_handle != INVALID_HANDLE_VALUE);
+ if (use_event)
+ stream_flags |= AUDCLNT_STREAMFLAGS_EVENTCALLBACK;
+ DVLOG(2) << "stream_flags: 0x" << std::hex << stream_flags;
+
+ // Initialize the shared mode client for minimal delay.
+ HRESULT hr = client->Initialize(AUDCLNT_SHAREMODE_SHARED,
+ stream_flags,
+ 0,
+ 0,
+ reinterpret_cast<const WAVEFORMATEX*>(format),
+ NULL);
+ if (FAILED(hr)) {
+ DVLOG(1) << "IAudioClient::Initialize: " << std::hex << hr;
+ return hr;
+ }
+
+ if (use_event) {
+ hr = client->SetEventHandle(event_handle);
+ if (FAILED(hr)) {
+ DVLOG(1) << "IAudioClient::SetEventHandle: " << std::hex << hr;
+ return hr;
+ }
+ }
+
+ UINT32 buffer_size_in_frames = 0;
+ hr = client->GetBufferSize(&buffer_size_in_frames);
+ if (FAILED(hr)) {
+ DVLOG(1) << "IAudioClient::GetBufferSize: " << std::hex << hr;
+ return hr;
+ }
+
+ *endpoint_buffer_size = static_cast<size_t>(buffer_size_in_frames);
+ DVLOG(2) << "endpoint buffer size: " << buffer_size_in_frames;
+
+ // TODO(henrika): utilize when delay measurements are added.
+ REFERENCE_TIME latency = 0;
+ hr = client->GetStreamLatency(&latency);
+ DVLOG(2) << "stream latency: "
+ << RefererenceTimeToMilliseconds(latency) << " [ms]";
+ return hr;
+}
+
+ScopedComPtr<IAudioRenderClient> CoreAudioUtil::CreateRenderClient(
+ IAudioClient* client) {
+ DCHECK(CoreAudioUtil::IsSupported());
+
+ // Get access to the IAudioRenderClient interface. This interface
+ // enables us to write output data to a rendering endpoint buffer.
+ ScopedComPtr<IAudioRenderClient> audio_render_client;
+ HRESULT hr = client->GetService(__uuidof(IAudioRenderClient),
+ audio_render_client.ReceiveVoid());
+ if (FAILED(hr)) {
+ DVLOG(1) << "IAudioClient::GetService: " << std::hex << hr;
+ return ScopedComPtr<IAudioRenderClient>();
+ }
+
+ // TODO(henrika): verify that this scheme is the same for shared mode and
+ // exclusive mode streams.
+
+ // Avoid start-up glitches by filling up the endpoint buffer with "silence"
+ // before starting the stream.
+ UINT32 endpoint_buffer_size = 0;
+ hr = client->GetBufferSize(&endpoint_buffer_size);
+ DVLOG_IF(1, FAILED(hr)) << "IAudioClient::GetBufferSize: " << std::hex << hr;
+
+ BYTE* data = NULL;
+ hr = audio_render_client->GetBuffer(endpoint_buffer_size, &data);
+ DVLOG_IF(1, FAILED(hr)) << "IAudioRenderClient::GetBuffer: "
+ << std::hex << hr;
+ if (SUCCEEDED(hr)) {
+ // Using the AUDCLNT_BUFFERFLAGS_SILENT flag eliminates the need to
+ // explicitly write silence data to the rendering buffer.
+ hr = audio_render_client->ReleaseBuffer(endpoint_buffer_size,
+ AUDCLNT_BUFFERFLAGS_SILENT);
+ DVLOG_IF(1, FAILED(hr)) << "IAudioRenderClient::ReleaseBuffer: "
+ << std::hex << hr;
+ }
+
+ // Sanity check: verify that the endpoint buffer is filled with silence.
+ UINT32 num_queued_frames = 0;
+ client->GetCurrentPadding(&num_queued_frames);
+ DCHECK(num_queued_frames == endpoint_buffer_size);
+
+ return audio_render_client;
+}
+
+ScopedComPtr<IAudioCaptureClient> CoreAudioUtil::CreateCaptureClient(
+ IAudioClient* client) {
+ DCHECK(CoreAudioUtil::IsSupported());
+
+ // Get access to the IAudioCaptureClient interface. This interface
+ // enables us to read input data from a capturing endpoint buffer.
+ ScopedComPtr<IAudioCaptureClient> audio_capture_client;
+ HRESULT hr = client->GetService(__uuidof(IAudioCaptureClient),
+ audio_capture_client.ReceiveVoid());
+ if (FAILED(hr)) {
+ DVLOG(1) << "IAudioClient::GetService: " << std::hex << hr;
+ return ScopedComPtr<IAudioCaptureClient>();
+ }
+ return audio_capture_client;
+}
+
} // namespace media
diff --git a/media/audio/win/core_audio_util_win.h b/media/audio/win/core_audio_util_win.h
index 8247287..e5b30a3 100644
--- a/media/audio/win/core_audio_util_win.h
+++ b/media/audio/win/core_audio_util_win.h
@@ -5,6 +5,8 @@
// Utility methods for the Core Audio API on Windows.
// Always ensure that Core Audio is supported before using these methods.
// Use media::CoreAudioIsSupported() for this purpose.
+// Also, all methods must be called on a valid COM thread. This can be done
+// by using the base::win::ScopedCOMInitializer helper class.
#ifndef MEDIA_AUDIO_WIN_CORE_AUDIO_UTIL_WIN_H_
#define MEDIA_AUDIO_WIN_CORE_AUDIO_UTIL_WIN_H_
@@ -16,6 +18,7 @@
#include "base/basictypes.h"
#include "base/win/scoped_comptr.h"
#include "media/audio/audio_device_name.h"
+#include "media/audio/audio_parameters.h"
#include "media/base/media_export.h"
using base::win::ScopedComPtr;
@@ -27,11 +30,15 @@ class MEDIA_EXPORT CoreAudioUtil {
// Returns true if Windows Core Audio is supported.
static bool IsSupported();
+ // Returns AUDCLNT_SHAREMODE_EXCLUSIVE if --enable-exclusive-mode is used
+ // as command-line flag and AUDCLNT_SHAREMODE_SHARED otherwise (default).
+ static AUDCLNT_SHAREMODE GetShareMode();
+
// The Windows Multimedia Device (MMDevice) API enables audio clients to
// discover audio endpoint devices and determine their capabilities.
// Number of active audio devices in the specified flow data flow direction.
- // Set |data_flow| to eAll to retrive the total number of active audio
+ // Set |data_flow| to eAll to retrieve the total number of active audio
// devices.
static int NumberOfActiveDevices(EDataFlow data_flow);
@@ -39,12 +46,12 @@ class MEDIA_EXPORT CoreAudioUtil {
// enumerating audio endpoint devices.
static ScopedComPtr<IMMDeviceEnumerator> CreateDeviceEnumerator();
- // Create a default endpoint device that is specified by a data-flow
+ // Creates a default endpoint device that is specified by a data-flow
// direction and role, e.g. default render device.
static ScopedComPtr<IMMDevice> CreateDefaultDevice(
EDataFlow data_flow, ERole role);
- // Create an endpoint device that is specified by a unique endpoint device-
+ // Creates an endpoint device that is specified by a unique endpoint device-
// identification string.
static ScopedComPtr<IMMDevice> CreateDevice(const std::string& device_id);
@@ -53,11 +60,11 @@ class MEDIA_EXPORT CoreAudioUtil {
// "Microphone (Realtek High Definition Audio)".
static HRESULT GetDeviceName(IMMDevice* device, AudioDeviceName* name);
- // Gets the user-friendly name of the endpoint devcice which is represented
- // by a uniqe id in |device_id|.
+ // Gets the user-friendly name of the endpoint device which is represented
+ // by a unique id in |device_id|.
static std::string GetFriendlyName(const std::string& device_id);
- // Returns true if the provided uniqe |device_id| correspinds to the current
+ // Returns true if the provided unique |device_id| corresponds to the current
// default device for the specified by a data-flow direction and role.
static bool DeviceIsDefault(
EDataFlow flow, ERole role, std::string device_id);
@@ -69,14 +76,78 @@ class MEDIA_EXPORT CoreAudioUtil {
// manage the flow of audio data between the application and an audio endpoint
// device.
- // Create an IAudioClient interface for an existing IMMDevice given by
- // |audio_device|. Flow direction and role is define by the |audio_device|.
+ // Create an IAudioClient interface for the default IMMDevice where
+ // flow direction and role is define by |data_flow| and |role|.
// The IAudioClient interface enables a client to create and initialize an
// audio stream between an audio application and the audio engine (for a
// shared-mode stream) or the hardware buffer of an audio endpoint device
// (for an exclusive-mode stream).
+ static ScopedComPtr<IAudioClient> CreateDefaultClient(EDataFlow data_flow,
+ ERole role);
+
+ // Create an IAudioClient interface for an existing IMMDevice given by
+ // |audio_device|. Flow direction and role is define by the |audio_device|.
static ScopedComPtr<IAudioClient> CreateClient(IMMDevice* audio_device);
+ // Get the mix format that the audio engine uses internally for processing
+ // of shared-mode streams. This format is not necessarily a format that the
+ // audio endpoint device supports. Thus, the caller might not succeed in
+ // creating an exclusive-mode stream with a format obtained by this method.
+ static HRESULT GetSharedModeMixFormat(IAudioClient* client,
+ WAVEFORMATPCMEX* format);
+
+ // Returns true if the specified |client| supports the format in |format|
+ // for the given |share_mode| (shared or exclusive).
+ static bool IsFormatSupported(IAudioClient* client,
+ AUDCLNT_SHAREMODE share_mode,
+ const WAVEFORMATPCMEX* format);
+
+ // For a shared-mode stream, the audio engine periodically processes the
+ // data in the endpoint buffer at the period obtained in |device_period|.
+ // For an exclusive mode stream, |device_period| corresponds to the minimum
+ // time interval between successive processing by the endpoint device.
+ // This period plus the stream latency between the buffer and endpoint device
+ // represents the minimum possible latency that an audio application can
+ // achieve. The time in |device_period| is expressed in 100-nanosecond units.
+ static HRESULT GetDevicePeriod(IAudioClient* client,
+ AUDCLNT_SHAREMODE share_mode,
+ REFERENCE_TIME* device_period);
+
+ // Get the preferred audio parameters for the specified |client| or the
+ // given direction and role is define by |data_flow| and |role|.
+ // The acquired values should only be utilized for shared mode streamed since
+ // there are no preferred settings for an exclusive mode stream.
+ static HRESULT GetPreferredAudioParameters(IAudioClient* client,
+ AudioParameters* params);
+ static HRESULT GetPreferredAudioParameters(EDataFlow data_flow, ERole role,
+ AudioParameters* params);
+
+ // After activating an IAudioClient interface on an audio endpoint device,
+ // the client must initialize it once, and only once, to initialize the audio
+ // stream between the client and the device. In shared mode, the client
+ // connects indirectly through the audio engine which does the mixing.
+ // In exclusive mode, the client connects directly to the audio hardware.
+ // If a valid event is provided in |event_handle|, the client will be
+ // initialized for event-driven buffer handling. If |event_handle| is set to
+ // NULL, event-driven buffer handling is not utilized.
+ static HRESULT SharedModeInitialize(IAudioClient* client,
+ const WAVEFORMATPCMEX* format,
+ HANDLE event_handle,
+ size_t* endpoint_buffer_size);
+ // TODO(henrika): add ExclusiveModeInitialize(...)
+
+ // Create an IAudioRenderClient client for an existing IAudioClient given by
+ // |client|. The IAudioRenderClient interface enables a client to write
+ // output data to a rendering endpoint buffer.
+ static ScopedComPtr<IAudioRenderClient> CreateRenderClient(
+ IAudioClient* client);
+
+ // Create an IAudioCaptureClient client for an existing IAudioClient given by
+ // |client|. The IAudioCaptureClient interface enables a client to read
+ // input data from a capture endpoint buffer.
+ static ScopedComPtr<IAudioCaptureClient> CreateCaptureClient(
+ IAudioClient* client);
+
private:
CoreAudioUtil() {}
~CoreAudioUtil() {}
diff --git a/media/audio/win/core_audio_util_win_unittest.cc b/media/audio/win/core_audio_util_win_unittest.cc
index 039c7bb..b1edf47 100644
--- a/media/audio/win/core_audio_util_win_unittest.cc
+++ b/media/audio/win/core_audio_util_win_unittest.cc
@@ -5,6 +5,7 @@
#include "base/memory/scoped_ptr.h"
#include "base/synchronization/waitable_event.h"
#include "base/win/scoped_com_initializer.h"
+#include "base/win/scoped_handle.h"
#include "media/audio/win/core_audio_util_win.h"
#include "testing/gmock/include/gmock/gmock.h"
#include "testing/gtest/include/gtest/gtest.h"
@@ -179,18 +180,210 @@ TEST_F(CoreAudioUtilWinTest, DeviceIsDefault) {
EXPECT_FALSE(CoreAudioUtil::DeviceIsDefault(eCapture, eConsole, id));
}
+TEST_F(CoreAudioUtilWinTest, CreateDefaultClient) {
+ if (!CanRunAudioTest())
+ return;
+
+ EDataFlow data[] = {eRender, eCapture};
+
+ for (int i = 0; i < arraysize(data); ++i) {
+ ScopedComPtr<IAudioClient> client;
+ client = CoreAudioUtil::CreateDefaultClient(data[i], eConsole);
+ EXPECT_TRUE(client);
+ }
+}
+
TEST_F(CoreAudioUtilWinTest, CreateClient) {
if (!CanRunAudioTest())
return;
EDataFlow data[] = {eRender, eCapture};
- ScopedComPtr<IMMDevice> device;
for (int i = 0; i < arraysize(data); ++i) {
+ ScopedComPtr<IMMDevice> device;
+ ScopedComPtr<IAudioClient> client;
device = CoreAudioUtil::CreateDefaultDevice(data[i], eConsole);
EXPECT_TRUE(device);
EXPECT_EQ(data[i], CoreAudioUtil::GetDataFlow(device));
+ client = CoreAudioUtil::CreateClient(device);
+ EXPECT_TRUE(client);
+ }
+}
+
+TEST_F(CoreAudioUtilWinTest, GetSharedModeMixFormat) {
+ if (!CanRunAudioTest())
+ return;
+
+ ScopedComPtr<IMMDevice> device;
+ ScopedComPtr<IAudioClient> client;
+ device = CoreAudioUtil::CreateDefaultDevice(eRender, eConsole);
+ EXPECT_TRUE(device);
+ client = CoreAudioUtil::CreateClient(device);
+ EXPECT_TRUE(client);
+
+ // Perform a simple sanity test of the aquired format structure.
+ WAVEFORMATPCMEX format;
+ EXPECT_TRUE(SUCCEEDED(CoreAudioUtil::GetSharedModeMixFormat(client,
+ &format)));
+ EXPECT_GE(format.Format.nChannels, 1);
+ EXPECT_GE(format.Format.nSamplesPerSec, 8000u);
+ EXPECT_GE(format.Format.wBitsPerSample, 16);
+ EXPECT_GE(format.Samples.wValidBitsPerSample, 16);
+ EXPECT_EQ(format.Format.wFormatTag, WAVE_FORMAT_EXTENSIBLE);
+}
+
+TEST_F(CoreAudioUtilWinTest, GetDevicePeriod) {
+ if (!CanRunAudioTest())
+ return;
+
+ EDataFlow data[] = {eRender, eCapture};
+
+ // Verify that the device periods are valid for the default render and
+ // capture devices.
+ for (int i = 0; i < arraysize(data); ++i) {
+ ScopedComPtr<IAudioClient> client;
+ REFERENCE_TIME shared_time_period = 0;
+ REFERENCE_TIME exclusive_time_period = 0;
+ client = CoreAudioUtil::CreateDefaultClient(data[i], eConsole);
+ EXPECT_TRUE(client);
+ EXPECT_TRUE(SUCCEEDED(CoreAudioUtil::GetDevicePeriod(
+ client, AUDCLNT_SHAREMODE_SHARED, &shared_time_period)));
+ EXPECT_GT(shared_time_period, 0);
+ EXPECT_TRUE(SUCCEEDED(CoreAudioUtil::GetDevicePeriod(
+ client, AUDCLNT_SHAREMODE_EXCLUSIVE, &exclusive_time_period)));
+ EXPECT_GT(exclusive_time_period, 0);
+ EXPECT_LE(exclusive_time_period, shared_time_period);
}
}
+TEST_F(CoreAudioUtilWinTest, GetPreferredAudioParameters) {
+ if (!CanRunAudioTest())
+ return;
+
+ EDataFlow data[] = {eRender, eCapture};
+
+ // Verify that the preferred audio parameters are OK for the default render
+ // and capture devices.
+ for (int i = 0; i < arraysize(data); ++i) {
+ ScopedComPtr<IAudioClient> client;
+ AudioParameters params;
+ client = CoreAudioUtil::CreateDefaultClient(data[i], eConsole);
+ EXPECT_TRUE(client);
+ EXPECT_TRUE(SUCCEEDED(CoreAudioUtil::GetPreferredAudioParameters(client,
+ &params)));
+ EXPECT_TRUE(params.IsValid());
+ }
+}
+
+TEST_F(CoreAudioUtilWinTest, SharedModeInitialize) {
+ if (!CanRunAudioTest())
+ return;
+
+ ScopedComPtr<IAudioClient> client;
+ client = CoreAudioUtil::CreateDefaultClient(eRender, eConsole);
+ EXPECT_TRUE(client);
+
+ WAVEFORMATPCMEX format;
+ EXPECT_TRUE(SUCCEEDED(CoreAudioUtil::GetSharedModeMixFormat(client,
+ &format)));
+
+ // Perform a shared-mode initialization without event-driven buffer handling.
+ size_t endpoint_buffer_size = 0;
+ HRESULT hr = CoreAudioUtil::SharedModeInitialize(client, &format, NULL,
+ &endpoint_buffer_size);
+ EXPECT_TRUE(SUCCEEDED(hr));
+ EXPECT_GT(endpoint_buffer_size, 0u);
+
+ // It is only possible to create a client once.
+ hr = CoreAudioUtil::SharedModeInitialize(client, &format, NULL,
+ &endpoint_buffer_size);
+ EXPECT_FALSE(SUCCEEDED(hr));
+ EXPECT_EQ(hr, AUDCLNT_E_ALREADY_INITIALIZED);
+
+ // Verify that it is possible to reinitialize the client after releasing it.
+ client = CoreAudioUtil::CreateDefaultClient(eRender, eConsole);
+ EXPECT_TRUE(client);
+ hr = CoreAudioUtil::SharedModeInitialize(client, &format, NULL,
+ &endpoint_buffer_size);
+ EXPECT_TRUE(SUCCEEDED(hr));
+ EXPECT_GT(endpoint_buffer_size, 0u);
+
+ // Use a non-supported format and verify that initialization fails.
+ // A simple way to emulate an invalid format is to use the shared-mode
+ // mixing format and modify the preferred sample.
+ client = CoreAudioUtil::CreateDefaultClient(eRender, eConsole);
+ EXPECT_TRUE(client);
+ format.Format.nSamplesPerSec = format.Format.nSamplesPerSec + 1;
+ EXPECT_FALSE(CoreAudioUtil::IsFormatSupported(
+ client, AUDCLNT_SHAREMODE_SHARED, &format));
+ hr = CoreAudioUtil::SharedModeInitialize(client, &format, NULL,
+ &endpoint_buffer_size);
+ EXPECT_TRUE(FAILED(hr));
+ EXPECT_EQ(hr, E_INVALIDARG);
+
+ // Finally, perform a shared-mode initialization using event-driven buffer
+ // handling. The event handle will be signaled when an audio buffer is ready
+ // to be processed by the client (not verified here).
+ // The event handle should be in the nonsignaled state.
+ base::win::ScopedHandle event_handle(::CreateEvent(NULL, TRUE, FALSE, NULL));
+ client = CoreAudioUtil::CreateDefaultClient(eRender, eConsole);
+ EXPECT_TRUE(client);
+ EXPECT_TRUE(SUCCEEDED(CoreAudioUtil::GetSharedModeMixFormat(client,
+ &format)));
+ EXPECT_TRUE(CoreAudioUtil::IsFormatSupported(
+ client, AUDCLNT_SHAREMODE_SHARED, &format));
+ hr = CoreAudioUtil::SharedModeInitialize(client, &format, event_handle.Get(),
+ &endpoint_buffer_size);
+ EXPECT_TRUE(SUCCEEDED(hr));
+ EXPECT_GT(endpoint_buffer_size, 0u);
+}
+
+TEST_F(CoreAudioUtilWinTest, CreateRenderAndCaptureClients) {
+ if (!CanRunAudioTest())
+ return;
+
+ EDataFlow data[] = {eRender, eCapture};
+
+ WAVEFORMATPCMEX format;
+ size_t endpoint_buffer_size = 0;
+
+ for (int i = 0; i < arraysize(data); ++i) {
+ ScopedComPtr<IAudioClient> client;
+ ScopedComPtr<IAudioRenderClient> render_client;
+ ScopedComPtr<IAudioCaptureClient> capture_client;
+
+ client = CoreAudioUtil::CreateDefaultClient(data[i], eConsole);
+ EXPECT_TRUE(client);
+ EXPECT_TRUE(SUCCEEDED(CoreAudioUtil::GetSharedModeMixFormat(client,
+ &format)));
+ if (data[i] == eRender) {
+ // It is not possible to create a render client using an unitialized
+ // client interface.
+ render_client = CoreAudioUtil::CreateRenderClient(client);
+ EXPECT_FALSE(render_client);
+
+ // Do a proper initialization and verify that it works this time.
+ CoreAudioUtil::SharedModeInitialize(client, &format, NULL,
+ &endpoint_buffer_size);
+ render_client = CoreAudioUtil::CreateRenderClient(client);
+ EXPECT_TRUE(render_client);
+ EXPECT_GT(endpoint_buffer_size, 0u);
+ } else if (data[i] == eCapture) {
+ // It is not possible to create a capture client using an unitialized
+ // client interface.
+ capture_client = CoreAudioUtil::CreateCaptureClient(client);
+ EXPECT_FALSE(capture_client);
+
+ // Do a proper initialization and verify that it works this time.
+ CoreAudioUtil::SharedModeInitialize(client, &format, NULL,
+ &endpoint_buffer_size);
+ capture_client = CoreAudioUtil::CreateCaptureClient(client);
+ EXPECT_TRUE(capture_client);
+ EXPECT_GT(endpoint_buffer_size, 0u);
+ }
+ }
+}
+
+//
+
} // namespace media