diff options
author | henrika@chromium.org <henrika@chromium.org@0039d316-1c4b-4281-b951-d872f2087c98> | 2012-10-05 11:52:55 +0000 |
---|---|---|
committer | henrika@chromium.org <henrika@chromium.org@0039d316-1c4b-4281-b951-d872f2087c98> | 2012-10-05 11:52:55 +0000 |
commit | e75fe3ded56552f45e19c058e03794bc488429f6 (patch) | |
tree | d6e33d1d9909403d0a0a08e8bbf68f78af4604cb /media | |
parent | de2d4ae90ca20fdc48ca70a0ab6de74076503d2d (diff) | |
download | chromium_src-e75fe3ded56552f45e19c058e03794bc488429f6.zip chromium_src-e75fe3ded56552f45e19c058e03794bc488429f6.tar.gz chromium_src-e75fe3ded56552f45e19c058e03794bc488429f6.tar.bz2 |
Add WASAPI-based unified audio I/O back-end for Windows 7.
Implementation of AudioOuputStream for Windows using the WASAPI API.
Both input and output device must use the same sample rate.
Corresponding implementation for Mac OS X is given by http://codereview.chromium.org/10916105/.
Additional test using full Chrome:
http://webaudiodemos.appspot.com/input/index.html
Local tests have been performed using a wide range of audio devices and sample rates. It has also been verified that the QoS is good between two different devices as well as long as they both run at the same sample rate.
12 hour test of http://webaudiodemos.appspot.com/input/index.html worked well.
Verified that we log a warning "Unified audio I/O is not supported." if the user has selected different sample rates for in and out.
BUG=145092
TEST=media_unittests.exe --gtest_filter=WASAPIUnified* --enable-webaudio-input
Review URL: https://codereview.chromium.org/10959068
git-svn-id: svn://svn.chromium.org/chrome/trunk/src@160346 0039d316-1c4b-4281-b951-d872f2087c98
Diffstat (limited to 'media')
-rw-r--r-- | media/audio/audio_util.cc | 10 | ||||
-rw-r--r-- | media/audio/win/audio_manager_win.cc | 687 | ||||
-rw-r--r-- | media/audio/win/audio_unified_win.cc | 708 | ||||
-rw-r--r-- | media/audio/win/audio_unified_win.h | 192 | ||||
-rw-r--r-- | media/audio/win/audio_unified_win_unittest.cc | 280 | ||||
-rw-r--r-- | media/media.gyp | 3 |
6 files changed, 1540 insertions, 340 deletions
diff --git a/media/audio/audio_util.cc b/media/audio/audio_util.cc index 6307563..e14c036 100644 --- a/media/audio/audio_util.cc +++ b/media/audio/audio_util.cc @@ -32,6 +32,7 @@ #include "media/audio/audio_manager_base.h" #include "media/audio/win/audio_low_latency_input_win.h" #include "media/audio/win/audio_low_latency_output_win.h" +#include "media/audio/win/audio_unified_win.h" #include "media/base/limits.h" #include "media/base/media_switches.h" #endif @@ -323,6 +324,15 @@ size_t GetAudioHardwareBufferSize() { return 256; } + // TODO(henrika): remove when HardwareBufferSize() has been tested well + // enough to be moved from WASAPIUnifiedStream to WASAPIAudioOutputStream. + if (cmd_line->HasSwitch(switches::kEnableWebAudioInput)) { + int buffer_size = WASAPIUnifiedStream::HardwareBufferSize(eRender); + // |buffer_size| can be zero if we use e.g. remote desktop or if all + // audio devices are disabled. + return (buffer_size > 0) ? buffer_size : kFallbackBufferSize; + } + // This call must be done on a COM thread configured as MTA. // TODO(tommi): http://code.google.com/p/chromium/issues/detail?id=103835. int mixing_sample_rate = diff --git a/media/audio/win/audio_manager_win.cc b/media/audio/win/audio_manager_win.cc index 46a387c..9622e3f 100644 --- a/media/audio/win/audio_manager_win.cc +++ b/media/audio/win/audio_manager_win.cc @@ -1,340 +1,347 @@ -// Copyright (c) 2012 The Chromium Authors. All rights reserved. -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -#include "media/audio/audio_io.h" - -#include <windows.h> -#include <objbase.h> // This has to be before initguid.h -#include <initguid.h> -#include <mmsystem.h> -#include <setupapi.h> - -#include "base/basictypes.h" -#include "base/command_line.h" -#include "base/file_path.h" -#include "base/memory/scoped_ptr.h" -#include "base/path_service.h" -#include "base/process_util.h" -#include "base/string_number_conversions.h" -#include "base/string_util.h" -#include "media/audio/audio_util.h" -#include "media/audio/win/audio_low_latency_input_win.h" -#include "media/audio/win/audio_low_latency_output_win.h" -#include "media/audio/win/audio_manager_win.h" -#include "media/audio/win/device_enumeration_win.h" -#include "media/audio/win/wavein_input_win.h" -#include "media/audio/win/waveout_output_win.h" -#include "media/base/limits.h" - -// Libraries required for the SetupAPI and Wbem APIs used here. -#pragma comment(lib, "setupapi.lib") - -// The following are defined in various DDK headers, and we (re)define them -// here to avoid adding the DDK as a chrome dependency. -#define DRV_QUERYDEVICEINTERFACE 0x80c -#define DRVM_MAPPER_PREFERRED_GET 0x2015 -#define DRV_QUERYDEVICEINTERFACESIZE 0x80d -DEFINE_GUID(AM_KSCATEGORY_AUDIO, 0x6994ad04, 0x93ef, 0x11d0, - 0xa3, 0xcc, 0x00, 0xa0, 0xc9, 0x22, 0x31, 0x96); - -namespace media { - -// Maximum number of output streams that can be open simultaneously. -static const int kMaxOutputStreams = 50; - -// Up to 8 channels can be passed to the driver. -// This should work, given the right drivers, but graceful error handling is -// needed. -static const int kWinMaxChannels = 8; - -// We use 3 buffers for recording audio so that if a recording callback takes -// some time to return we won't lose audio. More buffers while recording are -// ok because they don't introduce any delay in recording, unlike in playback -// where you first need to fill in that number of buffers before starting to -// play. -static const int kNumInputBuffers = 3; - -static int GetVersionPartAsInt(DWORDLONG num) { - return static_cast<int>(num & 0xffff); -} - -// Returns a string containing the given device's description and installed -// driver version. -static string16 GetDeviceAndDriverInfo(HDEVINFO device_info, - SP_DEVINFO_DATA* device_data) { - // Save the old install params setting and set a flag for the - // SetupDiBuildDriverInfoList below to return only the installed drivers. - SP_DEVINSTALL_PARAMS old_device_install_params; - old_device_install_params.cbSize = sizeof(old_device_install_params); - SetupDiGetDeviceInstallParams(device_info, device_data, - &old_device_install_params); - SP_DEVINSTALL_PARAMS device_install_params = old_device_install_params; - device_install_params.FlagsEx |= DI_FLAGSEX_INSTALLEDDRIVER; - SetupDiSetDeviceInstallParams(device_info, device_data, - &device_install_params); - - SP_DRVINFO_DATA driver_data; - driver_data.cbSize = sizeof(driver_data); - string16 device_and_driver_info; - if (SetupDiBuildDriverInfoList(device_info, device_data, - SPDIT_COMPATDRIVER)) { - if (SetupDiEnumDriverInfo(device_info, device_data, SPDIT_COMPATDRIVER, 0, - &driver_data)) { - DWORDLONG version = driver_data.DriverVersion; - device_and_driver_info = string16(driver_data.Description) + L" v" + - base::IntToString16(GetVersionPartAsInt((version >> 48))) + L"." + - base::IntToString16(GetVersionPartAsInt((version >> 32))) + L"." + - base::IntToString16(GetVersionPartAsInt((version >> 16))) + L"." + - base::IntToString16(GetVersionPartAsInt(version)); - } - SetupDiDestroyDriverInfoList(device_info, device_data, SPDIT_COMPATDRIVER); - } - - SetupDiSetDeviceInstallParams(device_info, device_data, - &old_device_install_params); - - return device_and_driver_info; -} - -AudioManagerWin::AudioManagerWin() { - if (!media::IsWASAPISupported()) { - // Use the Wave API for device enumeration if XP or lower. - enumeration_type_ = kWaveEnumeration; - } else { - // Use the MMDevice API for device enumeration if Vista or higher. - enumeration_type_ = kMMDeviceEnumeration; - } - - SetMaxOutputStreamsAllowed(kMaxOutputStreams); -} - -AudioManagerWin::~AudioManagerWin() { - Shutdown(); -} - -bool AudioManagerWin::HasAudioOutputDevices() { - return (::waveOutGetNumDevs() != 0); -} - -bool AudioManagerWin::HasAudioInputDevices() { - return (::waveInGetNumDevs() != 0); -} - -string16 AudioManagerWin::GetAudioInputDeviceModel() { - // Get the default audio capture device and its device interface name. - DWORD device_id = 0; - waveInMessage(reinterpret_cast<HWAVEIN>(WAVE_MAPPER), - DRVM_MAPPER_PREFERRED_GET, - reinterpret_cast<DWORD_PTR>(&device_id), NULL); - ULONG device_interface_name_size = 0; - waveInMessage(reinterpret_cast<HWAVEIN>(device_id), - DRV_QUERYDEVICEINTERFACESIZE, - reinterpret_cast<DWORD_PTR>(&device_interface_name_size), 0); - size_t bytes_in_char16 = sizeof(string16::value_type); - DCHECK_EQ(0u, device_interface_name_size % bytes_in_char16); - if (device_interface_name_size <= bytes_in_char16) - return string16(); // No audio capture device. - - string16 device_interface_name; - string16::value_type* name_ptr = WriteInto(&device_interface_name, - device_interface_name_size / bytes_in_char16); - waveInMessage(reinterpret_cast<HWAVEIN>(device_id), - DRV_QUERYDEVICEINTERFACE, - reinterpret_cast<DWORD_PTR>(name_ptr), - static_cast<DWORD_PTR>(device_interface_name_size)); - - // Enumerate all audio devices and find the one matching the above device - // interface name. - HDEVINFO device_info = SetupDiGetClassDevs( - &AM_KSCATEGORY_AUDIO, 0, 0, DIGCF_DEVICEINTERFACE | DIGCF_PRESENT); - if (device_info == INVALID_HANDLE_VALUE) - return string16(); - - DWORD interface_index = 0; - SP_DEVICE_INTERFACE_DATA interface_data; - interface_data.cbSize = sizeof(interface_data); - while (SetupDiEnumDeviceInterfaces(device_info, 0, &AM_KSCATEGORY_AUDIO, - interface_index++, &interface_data)) { - // Query the size of the struct, allocate it and then query the data. - SP_DEVINFO_DATA device_data; - device_data.cbSize = sizeof(device_data); - DWORD interface_detail_size = 0; - SetupDiGetDeviceInterfaceDetail(device_info, &interface_data, 0, 0, - &interface_detail_size, &device_data); - if (!interface_detail_size) - continue; - - scoped_array<char> interface_detail_buffer(new char[interface_detail_size]); - SP_DEVICE_INTERFACE_DETAIL_DATA* interface_detail = - reinterpret_cast<SP_DEVICE_INTERFACE_DETAIL_DATA*>( - interface_detail_buffer.get()); - interface_detail->cbSize = interface_detail_size; - if (!SetupDiGetDeviceInterfaceDetail(device_info, &interface_data, - interface_detail, - interface_detail_size, NULL, - &device_data)) - return string16(); - - bool device_found = (device_interface_name == interface_detail->DevicePath); - - if (device_found) - return GetDeviceAndDriverInfo(device_info, &device_data); - } - - return string16(); -} - -bool AudioManagerWin::CanShowAudioInputSettings() { - return true; -} - -void AudioManagerWin::ShowAudioInputSettings() { - std::wstring program; - std::string argument; - if (!media::IsWASAPISupported()) { - program = L"sndvol32.exe"; - argument = "-R"; - } else { - program = L"control.exe"; - argument = "mmsys.cpl,,1"; - } - - FilePath path; - PathService::Get(base::DIR_SYSTEM, &path); - path = path.Append(program); - CommandLine command_line(path); - command_line.AppendArg(argument); - base::LaunchProcess(command_line, base::LaunchOptions(), NULL); -} - -void AudioManagerWin::GetAudioInputDeviceNames( - media::AudioDeviceNames* device_names) { - DCHECK(enumeration_type() != kUninitializedEnumeration); - // Enumerate all active audio-endpoint capture devices. - if (enumeration_type() == kWaveEnumeration) { - // Utilize the Wave API for Windows XP. - media::GetInputDeviceNamesWinXP(device_names); - } else { - // Utilize the MMDevice API (part of Core Audio) for Vista and higher. - media::GetInputDeviceNamesWin(device_names); - } - - // Always add default device parameters as first element. - if (!device_names->empty()) { - media::AudioDeviceName name; - name.device_name = AudioManagerBase::kDefaultDeviceName; - name.unique_id = AudioManagerBase::kDefaultDeviceId; - device_names->push_front(name); - } -} - -// Factory for the implementations of AudioOutputStream for AUDIO_PCM_LINEAR -// mode. -// - PCMWaveOutAudioOutputStream: Based on the waveOut API. -AudioOutputStream* AudioManagerWin::MakeLinearOutputStream( - const AudioParameters& params) { - DCHECK_EQ(AudioParameters::AUDIO_PCM_LINEAR, params.format()); - if (params.channels() > kWinMaxChannels) - return NULL; - - return new PCMWaveOutAudioOutputStream(this, - params, - media::NumberOfWaveOutBuffers(), - WAVE_MAPPER); -} - -// Factory for the implementations of AudioOutputStream for -// AUDIO_PCM_LOW_LATENCY mode. Two implementations should suffice most -// windows user's needs. -// - PCMWaveOutAudioOutputStream: Based on the waveOut API. -// - WASAPIAudioOutputStream: Based on Core Audio (WASAPI) API. -AudioOutputStream* AudioManagerWin::MakeLowLatencyOutputStream( - const AudioParameters& params) { - DCHECK_EQ(AudioParameters::AUDIO_PCM_LOW_LATENCY, params.format()); - if (params.channels() > kWinMaxChannels) - return NULL; - - AudioOutputStream* stream = NULL; - if (!media::IsWASAPISupported()) { - // Fall back to Windows Wave implementation on Windows XP or lower. - DVLOG(1) << "Using WaveOut since WASAPI requires at least Vista."; - stream = new PCMWaveOutAudioOutputStream(this, params, 2, WAVE_MAPPER); - } else { - // TODO(henrika): improve possibility to specify audio endpoint. - // Use the default device (same as for Wave) for now to be compatible. - stream = new WASAPIAudioOutputStream(this, params, eConsole); - } - - return stream; -} - -// Factory for the implementations of AudioInputStream for AUDIO_PCM_LINEAR -// mode. -AudioInputStream* AudioManagerWin::MakeLinearInputStream( - const AudioParameters& params, const std::string& device_id) { - DCHECK_EQ(AudioParameters::AUDIO_PCM_LINEAR, params.format()); - return CreatePCMWaveInAudioInputStream(params, device_id); -} - -// Factory for the implementations of AudioInputStream for -// AUDIO_PCM_LOW_LATENCY mode. -AudioInputStream* AudioManagerWin::MakeLowLatencyInputStream( - const AudioParameters& params, const std::string& device_id) { - DCHECK_EQ(AudioParameters::AUDIO_PCM_LOW_LATENCY, params.format()); - AudioInputStream* stream = NULL; - if (!media::IsWASAPISupported()) { - // Fall back to Windows Wave implementation on Windows XP or lower. - DVLOG(1) << "Using WaveIn since WASAPI requires at least Vista."; - stream = CreatePCMWaveInAudioInputStream(params, device_id); - } else { - stream = new WASAPIAudioInputStream(this, params, device_id); - } - - return stream; -} - -AudioInputStream* AudioManagerWin::CreatePCMWaveInAudioInputStream( - const AudioParameters& params, - const std::string& device_id) { - std::string xp_device_id = device_id; - if (device_id != AudioManagerBase::kDefaultDeviceId && - enumeration_type_ == kMMDeviceEnumeration) { - xp_device_id = media::ConvertToWinXPDeviceId(device_id); - if (xp_device_id.empty()) { - DLOG(ERROR) << "Cannot find a waveIn device which matches the device ID " - << device_id; - return NULL; - } - } - - return new PCMWaveInAudioInputStream(this, params, kNumInputBuffers, - xp_device_id); -} - -/// static -AudioManager* CreateAudioManager() { - return new AudioManagerWin(); -} - -AudioParameters AudioManagerWin::GetPreferredLowLatencyOutputStreamParameters( - const AudioParameters& input_params) { - // If WASAPI isn't supported we'll fallback to WaveOut, which will take care - // of resampling and bits per sample changes. By setting these equal to the - // input values, AudioOutputResampler will skip resampling and bit per sample - // differences (since the input parameters will match the output parameters). - int sample_rate = input_params.sample_rate(); - int bits_per_sample = input_params.bits_per_sample(); - if (IsWASAPISupported()) { - sample_rate = GetAudioHardwareSampleRate(); - bits_per_sample = 16; - } - - // TODO(dalecurtis): This should include bits per channel and channel layout - // eventually. - return AudioParameters( - AudioParameters::AUDIO_PCM_LOW_LATENCY, input_params.channel_layout(), - sample_rate, bits_per_sample, GetAudioHardwareBufferSize()); -} - -} // namespace media +// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "media/audio/audio_io.h"
+
+#include <windows.h>
+#include <objbase.h> // This has to be before initguid.h
+#include <initguid.h>
+#include <mmsystem.h>
+#include <setupapi.h>
+
+#include "base/basictypes.h"
+#include "base/command_line.h"
+#include "base/file_path.h"
+#include "base/memory/scoped_ptr.h"
+#include "base/path_service.h"
+#include "base/process_util.h"
+#include "base/string_number_conversions.h"
+#include "base/string_util.h"
+#include "media/audio/audio_util.h"
+#include "media/audio/win/audio_low_latency_input_win.h"
+#include "media/audio/win/audio_low_latency_output_win.h"
+#include "media/audio/win/audio_manager_win.h"
+#include "media/audio/win/audio_unified_win.h"
+#include "media/audio/win/device_enumeration_win.h"
+#include "media/audio/win/wavein_input_win.h"
+#include "media/audio/win/waveout_output_win.h"
+#include "media/base/limits.h"
+#include "media/base/media_switches.h"
+
+// Libraries required for the SetupAPI and Wbem APIs used here.
+#pragma comment(lib, "setupapi.lib")
+
+// The following are defined in various DDK headers, and we (re)define them
+// here to avoid adding the DDK as a chrome dependency.
+#define DRV_QUERYDEVICEINTERFACE 0x80c
+#define DRVM_MAPPER_PREFERRED_GET 0x2015
+#define DRV_QUERYDEVICEINTERFACESIZE 0x80d
+DEFINE_GUID(AM_KSCATEGORY_AUDIO, 0x6994ad04, 0x93ef, 0x11d0,
+ 0xa3, 0xcc, 0x00, 0xa0, 0xc9, 0x22, 0x31, 0x96);
+
+namespace media {
+
+// Maximum number of output streams that can be open simultaneously.
+static const int kMaxOutputStreams = 50;
+
+// Up to 8 channels can be passed to the driver.
+// This should work, given the right drivers, but graceful error handling is
+// needed.
+static const int kWinMaxChannels = 8;
+
+// We use 3 buffers for recording audio so that if a recording callback takes
+// some time to return we won't lose audio. More buffers while recording are
+// ok because they don't introduce any delay in recording, unlike in playback
+// where you first need to fill in that number of buffers before starting to
+// play.
+static const int kNumInputBuffers = 3;
+
+static int GetVersionPartAsInt(DWORDLONG num) {
+ return static_cast<int>(num & 0xffff);
+}
+
+// Returns a string containing the given device's description and installed
+// driver version.
+static string16 GetDeviceAndDriverInfo(HDEVINFO device_info,
+ SP_DEVINFO_DATA* device_data) {
+ // Save the old install params setting and set a flag for the
+ // SetupDiBuildDriverInfoList below to return only the installed drivers.
+ SP_DEVINSTALL_PARAMS old_device_install_params;
+ old_device_install_params.cbSize = sizeof(old_device_install_params);
+ SetupDiGetDeviceInstallParams(device_info, device_data,
+ &old_device_install_params);
+ SP_DEVINSTALL_PARAMS device_install_params = old_device_install_params;
+ device_install_params.FlagsEx |= DI_FLAGSEX_INSTALLEDDRIVER;
+ SetupDiSetDeviceInstallParams(device_info, device_data,
+ &device_install_params);
+
+ SP_DRVINFO_DATA driver_data;
+ driver_data.cbSize = sizeof(driver_data);
+ string16 device_and_driver_info;
+ if (SetupDiBuildDriverInfoList(device_info, device_data,
+ SPDIT_COMPATDRIVER)) {
+ if (SetupDiEnumDriverInfo(device_info, device_data, SPDIT_COMPATDRIVER, 0,
+ &driver_data)) {
+ DWORDLONG version = driver_data.DriverVersion;
+ device_and_driver_info = string16(driver_data.Description) + L" v" +
+ base::IntToString16(GetVersionPartAsInt((version >> 48))) + L"." +
+ base::IntToString16(GetVersionPartAsInt((version >> 32))) + L"." +
+ base::IntToString16(GetVersionPartAsInt((version >> 16))) + L"." +
+ base::IntToString16(GetVersionPartAsInt(version));
+ }
+ SetupDiDestroyDriverInfoList(device_info, device_data, SPDIT_COMPATDRIVER);
+ }
+
+ SetupDiSetDeviceInstallParams(device_info, device_data,
+ &old_device_install_params);
+
+ return device_and_driver_info;
+}
+
+AudioManagerWin::AudioManagerWin() {
+ if (!media::IsWASAPISupported()) {
+ // Use the Wave API for device enumeration if XP or lower.
+ enumeration_type_ = kWaveEnumeration;
+ } else {
+ // Use the MMDevice API for device enumeration if Vista or higher.
+ enumeration_type_ = kMMDeviceEnumeration;
+ }
+
+ SetMaxOutputStreamsAllowed(kMaxOutputStreams);
+}
+
+AudioManagerWin::~AudioManagerWin() {
+ Shutdown();
+}
+
+bool AudioManagerWin::HasAudioOutputDevices() {
+ return (::waveOutGetNumDevs() != 0);
+}
+
+bool AudioManagerWin::HasAudioInputDevices() {
+ return (::waveInGetNumDevs() != 0);
+}
+
+string16 AudioManagerWin::GetAudioInputDeviceModel() {
+ // Get the default audio capture device and its device interface name.
+ DWORD device_id = 0;
+ waveInMessage(reinterpret_cast<HWAVEIN>(WAVE_MAPPER),
+ DRVM_MAPPER_PREFERRED_GET,
+ reinterpret_cast<DWORD_PTR>(&device_id), NULL);
+ ULONG device_interface_name_size = 0;
+ waveInMessage(reinterpret_cast<HWAVEIN>(device_id),
+ DRV_QUERYDEVICEINTERFACESIZE,
+ reinterpret_cast<DWORD_PTR>(&device_interface_name_size), 0);
+ size_t bytes_in_char16 = sizeof(string16::value_type);
+ DCHECK_EQ(0u, device_interface_name_size % bytes_in_char16);
+ if (device_interface_name_size <= bytes_in_char16)
+ return string16(); // No audio capture device.
+
+ string16 device_interface_name;
+ string16::value_type* name_ptr = WriteInto(&device_interface_name,
+ device_interface_name_size / bytes_in_char16);
+ waveInMessage(reinterpret_cast<HWAVEIN>(device_id),
+ DRV_QUERYDEVICEINTERFACE,
+ reinterpret_cast<DWORD_PTR>(name_ptr),
+ static_cast<DWORD_PTR>(device_interface_name_size));
+
+ // Enumerate all audio devices and find the one matching the above device
+ // interface name.
+ HDEVINFO device_info = SetupDiGetClassDevs(
+ &AM_KSCATEGORY_AUDIO, 0, 0, DIGCF_DEVICEINTERFACE | DIGCF_PRESENT);
+ if (device_info == INVALID_HANDLE_VALUE)
+ return string16();
+
+ DWORD interface_index = 0;
+ SP_DEVICE_INTERFACE_DATA interface_data;
+ interface_data.cbSize = sizeof(interface_data);
+ while (SetupDiEnumDeviceInterfaces(device_info, 0, &AM_KSCATEGORY_AUDIO,
+ interface_index++, &interface_data)) {
+ // Query the size of the struct, allocate it and then query the data.
+ SP_DEVINFO_DATA device_data;
+ device_data.cbSize = sizeof(device_data);
+ DWORD interface_detail_size = 0;
+ SetupDiGetDeviceInterfaceDetail(device_info, &interface_data, 0, 0,
+ &interface_detail_size, &device_data);
+ if (!interface_detail_size)
+ continue;
+
+ scoped_array<char> interface_detail_buffer(new char[interface_detail_size]);
+ SP_DEVICE_INTERFACE_DETAIL_DATA* interface_detail =
+ reinterpret_cast<SP_DEVICE_INTERFACE_DETAIL_DATA*>(
+ interface_detail_buffer.get());
+ interface_detail->cbSize = interface_detail_size;
+ if (!SetupDiGetDeviceInterfaceDetail(device_info, &interface_data,
+ interface_detail,
+ interface_detail_size, NULL,
+ &device_data))
+ return string16();
+
+ bool device_found = (device_interface_name == interface_detail->DevicePath);
+
+ if (device_found)
+ return GetDeviceAndDriverInfo(device_info, &device_data);
+ }
+
+ return string16();
+}
+
+bool AudioManagerWin::CanShowAudioInputSettings() {
+ return true;
+}
+
+void AudioManagerWin::ShowAudioInputSettings() {
+ std::wstring program;
+ std::string argument;
+ if (!media::IsWASAPISupported()) {
+ program = L"sndvol32.exe";
+ argument = "-R";
+ } else {
+ program = L"control.exe";
+ argument = "mmsys.cpl,,1";
+ }
+
+ FilePath path;
+ PathService::Get(base::DIR_SYSTEM, &path);
+ path = path.Append(program);
+ CommandLine command_line(path);
+ command_line.AppendArg(argument);
+ base::LaunchProcess(command_line, base::LaunchOptions(), NULL);
+}
+
+void AudioManagerWin::GetAudioInputDeviceNames(
+ media::AudioDeviceNames* device_names) {
+ DCHECK(enumeration_type() != kUninitializedEnumeration);
+ // Enumerate all active audio-endpoint capture devices.
+ if (enumeration_type() == kWaveEnumeration) {
+ // Utilize the Wave API for Windows XP.
+ media::GetInputDeviceNamesWinXP(device_names);
+ } else {
+ // Utilize the MMDevice API (part of Core Audio) for Vista and higher.
+ media::GetInputDeviceNamesWin(device_names);
+ }
+
+ // Always add default device parameters as first element.
+ if (!device_names->empty()) {
+ media::AudioDeviceName name;
+ name.device_name = AudioManagerBase::kDefaultDeviceName;
+ name.unique_id = AudioManagerBase::kDefaultDeviceId;
+ device_names->push_front(name);
+ }
+}
+
+// Factory for the implementations of AudioOutputStream for AUDIO_PCM_LINEAR
+// mode.
+// - PCMWaveOutAudioOutputStream: Based on the waveOut API.
+AudioOutputStream* AudioManagerWin::MakeLinearOutputStream(
+ const AudioParameters& params) {
+ DCHECK_EQ(AudioParameters::AUDIO_PCM_LINEAR, params.format());
+ if (params.channels() > kWinMaxChannels)
+ return NULL;
+
+ return new PCMWaveOutAudioOutputStream(this,
+ params,
+ media::NumberOfWaveOutBuffers(),
+ WAVE_MAPPER);
+}
+
+// Factory for the implementations of AudioOutputStream for
+// AUDIO_PCM_LOW_LATENCY mode. Two implementations should suffice most
+// windows user's needs.
+// - PCMWaveOutAudioOutputStream: Based on the waveOut API.
+// - WASAPIAudioOutputStream: Based on Core Audio (WASAPI) API.
+AudioOutputStream* AudioManagerWin::MakeLowLatencyOutputStream(
+ const AudioParameters& params) {
+ DCHECK_EQ(AudioParameters::AUDIO_PCM_LOW_LATENCY, params.format());
+ if (params.channels() > kWinMaxChannels)
+ return NULL;
+
+ if (!media::IsWASAPISupported()) {
+ // Fall back to Windows Wave implementation on Windows XP or lower.
+ DVLOG(1) << "Using WaveOut since WASAPI requires at least Vista.";
+ return new PCMWaveOutAudioOutputStream(this, params, 2, WAVE_MAPPER);
+ }
+
+ // TODO(henrika): remove once we properly handle input device selection.
+ if (CommandLine::ForCurrentProcess()->HasSwitch(
+ switches::kEnableWebAudioInput)) {
+ if (WASAPIUnifiedStream::HasUnifiedDefaultIO()) {
+ DVLOG(1) << "WASAPIUnifiedStream is created.";
+ return new WASAPIUnifiedStream(this, params);
+ }
+ LOG(WARNING) << "Unified audio I/O is not supported.";
+ }
+
+ return new WASAPIAudioOutputStream(this, params, eConsole);
+}
+
+// Factory for the implementations of AudioInputStream for AUDIO_PCM_LINEAR
+// mode.
+AudioInputStream* AudioManagerWin::MakeLinearInputStream(
+ const AudioParameters& params, const std::string& device_id) {
+ DCHECK_EQ(AudioParameters::AUDIO_PCM_LINEAR, params.format());
+ return CreatePCMWaveInAudioInputStream(params, device_id);
+}
+
+// Factory for the implementations of AudioInputStream for
+// AUDIO_PCM_LOW_LATENCY mode.
+AudioInputStream* AudioManagerWin::MakeLowLatencyInputStream(
+ const AudioParameters& params, const std::string& device_id) {
+ DCHECK_EQ(AudioParameters::AUDIO_PCM_LOW_LATENCY, params.format());
+ AudioInputStream* stream = NULL;
+ if (!media::IsWASAPISupported()) {
+ // Fall back to Windows Wave implementation on Windows XP or lower.
+ DVLOG(1) << "Using WaveIn since WASAPI requires at least Vista.";
+ stream = CreatePCMWaveInAudioInputStream(params, device_id);
+ } else {
+ stream = new WASAPIAudioInputStream(this, params, device_id);
+ }
+
+ return stream;
+}
+
+AudioInputStream* AudioManagerWin::CreatePCMWaveInAudioInputStream(
+ const AudioParameters& params,
+ const std::string& device_id) {
+ std::string xp_device_id = device_id;
+ if (device_id != AudioManagerBase::kDefaultDeviceId &&
+ enumeration_type_ == kMMDeviceEnumeration) {
+ xp_device_id = media::ConvertToWinXPDeviceId(device_id);
+ if (xp_device_id.empty()) {
+ DLOG(ERROR) << "Cannot find a waveIn device which matches the device ID "
+ << device_id;
+ return NULL;
+ }
+ }
+
+ return new PCMWaveInAudioInputStream(this, params, kNumInputBuffers,
+ xp_device_id);
+}
+
+/// static
+AudioManager* CreateAudioManager() {
+ return new AudioManagerWin();
+}
+
+AudioParameters AudioManagerWin::GetPreferredLowLatencyOutputStreamParameters(
+ const AudioParameters& input_params) {
+ // If WASAPI isn't supported we'll fallback to WaveOut, which will take care
+ // of resampling and bits per sample changes. By setting these equal to the
+ // input values, AudioOutputResampler will skip resampling and bit per sample
+ // differences (since the input parameters will match the output parameters).
+ int sample_rate = input_params.sample_rate();
+ int bits_per_sample = input_params.bits_per_sample();
+ if (IsWASAPISupported()) {
+ sample_rate = GetAudioHardwareSampleRate();
+ bits_per_sample = 16;
+ }
+
+ // TODO(dalecurtis): This should include bits per channel and channel layout
+ // eventually.
+ return AudioParameters(
+ AudioParameters::AUDIO_PCM_LOW_LATENCY, input_params.channel_layout(),
+ sample_rate, bits_per_sample, GetAudioHardwareBufferSize());
+}
+
+} // namespace media
diff --git a/media/audio/win/audio_unified_win.cc b/media/audio/win/audio_unified_win.cc new file mode 100644 index 0000000..aa49f88 --- /dev/null +++ b/media/audio/win/audio_unified_win.cc @@ -0,0 +1,708 @@ +// Copyright (c) 2012 The Chromium Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#include "media/audio/win/audio_unified_win.h" + +#include <Functiondiscoverykeys_devpkey.h> + +#include "base/debug/trace_event.h" +#include "base/win/scoped_com_initializer.h" +#include "media/audio/win/audio_manager_win.h" +#include "media/audio/win/avrt_wrapper_win.h" + +using base::win::ScopedComPtr; +using base::win::ScopedCOMInitializer; +using base::win::ScopedCoMem; + +namespace media { + +static HRESULT GetMixFormat(EDataFlow data_flow, WAVEFORMATEX** device_format) { + // It is assumed that this static method is called from a COM thread, i.e., + // CoInitializeEx() is not called here again to avoid STA/MTA conflicts. + ScopedComPtr<IMMDeviceEnumerator> enumerator; + HRESULT hr = CoCreateInstance(__uuidof(MMDeviceEnumerator), + NULL, + CLSCTX_INPROC_SERVER, + __uuidof(IMMDeviceEnumerator), + enumerator.ReceiveVoid()); + if (FAILED(hr)) { + NOTREACHED() << "error code: " << std::hex << hr; + return hr; + } + + ScopedComPtr<IMMDevice> endpoint_device; + hr = enumerator->GetDefaultAudioEndpoint(data_flow, + eConsole, + endpoint_device.Receive()); + if (FAILED(hr)) { + LOG(WARNING) << "No audio end point: " << std::hex << hr; + return hr; + } + + ScopedComPtr<IAudioClient> audio_client; + hr = endpoint_device->Activate(__uuidof(IAudioClient), + CLSCTX_INPROC_SERVER, + NULL, + audio_client.ReceiveVoid()); + DCHECK(SUCCEEDED(hr)) << "Failed to activate device: " << std::hex << hr; + if (SUCCEEDED(hr)) { + // Retrieve the stream format that the audio engine uses for its internal + // processing/mixing of shared-mode streams. + hr = audio_client->GetMixFormat(device_format); + DCHECK(SUCCEEDED(hr)) << "GetMixFormat: " << std::hex << hr; + } + + return hr; +} + +static ScopedComPtr<IMMDevice> CreateDefaultAudioDevice(EDataFlow data_flow) { + ScopedComPtr<IMMDeviceEnumerator> device_enumerator; + ScopedComPtr<IMMDevice> endpoint_device; + + // Create the IMMDeviceEnumerator interface. + HRESULT hr = CoCreateInstance(__uuidof(MMDeviceEnumerator), + NULL, + CLSCTX_INPROC_SERVER, + __uuidof(IMMDeviceEnumerator), + device_enumerator.ReceiveVoid()); + if (SUCCEEDED(hr)) { + // Retrieve the default render audio endpoint for the specified role. + hr = device_enumerator->GetDefaultAudioEndpoint( + data_flow, eConsole, endpoint_device.Receive()); + + if (FAILED(hr)) { + PLOG(ERROR) << "GetDefaultAudioEndpoint: " << std::hex << hr; + return endpoint_device; + } + + // Verify that the audio endpoint device is active. That is, the audio + // adapter that connects to the endpoint device is present and enabled. + DWORD state = DEVICE_STATE_DISABLED; + hr = endpoint_device->GetState(&state); + if (SUCCEEDED(hr)) { + if (!(state & DEVICE_STATE_ACTIVE)) { + PLOG(ERROR) << "Selected render device is not active."; + endpoint_device.Release(); + } + } + } + + return endpoint_device; +} + +static ScopedComPtr<IAudioClient> CreateAudioClient(IMMDevice* audio_device) { + ScopedComPtr<IAudioClient> audio_client; + + // Creates and activates an IAudioClient COM object given the selected + // endpoint device. + HRESULT hr = audio_device->Activate(__uuidof(IAudioClient), + CLSCTX_INPROC_SERVER, + NULL, + audio_client.ReceiveVoid()); + PLOG_IF(ERROR, FAILED(hr)) << "IMMDevice::Activate: " << std::hex << hr; + return audio_client; +} + +static bool IsFormatSupported(IAudioClient* audio_client, + WAVEFORMATPCMEX* format) { + ScopedCoMem<WAVEFORMATEXTENSIBLE> closest_match; + HRESULT hr = audio_client->IsFormatSupported( + AUDCLNT_SHAREMODE_SHARED, reinterpret_cast<WAVEFORMATEX*>(format), + reinterpret_cast<WAVEFORMATEX**>(&closest_match)); + + // This log can only be triggered for shared mode. + DLOG_IF(ERROR, hr == S_FALSE) << "Format is not supported " + << "but a closest match exists."; + // This log can be triggered both for shared and exclusive modes. + DLOG_IF(ERROR, hr == AUDCLNT_E_UNSUPPORTED_FORMAT) << "Unsupported format."; + if (hr == S_FALSE) { + DVLOG(1) << "wFormatTag : " << closest_match->Format.wFormatTag; + DVLOG(1) << "nChannels : " << closest_match->Format.nChannels; + DVLOG(1) << "nSamplesPerSec: " << closest_match->Format.nSamplesPerSec; + DVLOG(1) << "wBitsPerSample: " << closest_match->Format.wBitsPerSample; + } + + return (hr == S_OK); +} + +// Get the default scheduling period for a shared-mode stream in a specified +// direction. Note that the period between processing passes by the audio +// engine is fixed for a particular audio endpoint device and represents the +// smallest processing quantum for the audio engine. +static REFERENCE_TIME GetAudioEngineDevicePeriod(EDataFlow data_flow) { + ScopedComPtr<IMMDevice> endpoint_device = CreateDefaultAudioDevice(data_flow); + if (!endpoint_device) + return 0; + + ScopedComPtr<IAudioClient> audio_client; + audio_client = CreateAudioClient(endpoint_device); + if (!audio_client) + return 0; + + REFERENCE_TIME default_device_period = 0; + REFERENCE_TIME minimum_device_period = 0; + + // Times are expressed in 100-nanosecond units. + HRESULT hr = audio_client->GetDevicePeriod(&default_device_period, + &minimum_device_period); + if (SUCCEEDED(hr)) { + std::string flow = (data_flow == eCapture) ? "[in] " : "[out] "; + DVLOG(1) << flow << "default_device_period: " << default_device_period; + DVLOG(1) << flow << "minimum_device_period: " << minimum_device_period; + + return default_device_period; + } + + return 0; +} + +WASAPIUnifiedStream::WASAPIUnifiedStream(AudioManagerWin* manager, + const AudioParameters& params) + : creating_thread_id_(base::PlatformThread::CurrentId()), + manager_(manager), + audio_io_thread_(NULL), + opened_(false), + endpoint_render_buffer_size_frames_(0), + endpoint_capture_buffer_size_frames_(0), + source_(NULL), + capture_bus_(AudioBus::Create(params)), + render_bus_(AudioBus::Create(params)) { + DCHECK(manager_); + + LOG_IF(ERROR, !HasUnifiedDefaultIO()) + << "Unified audio I/O is not supported."; + + // Load the Avrt DLL if not already loaded. Required to support MMCSS. + bool avrt_init = avrt::Initialize(); + DCHECK(avrt_init) << "Failed to load the avrt.dll"; + + // Begin with the WAVEFORMATEX structure that specifies the basic format. + WAVEFORMATEX* format = &format_.Format; + format->wFormatTag = WAVE_FORMAT_EXTENSIBLE; + format->nChannels = params.channels(); + format->nSamplesPerSec = params.sample_rate(); + format->wBitsPerSample = params.bits_per_sample(); + format->nBlockAlign = (format->wBitsPerSample / 8) * format->nChannels; + format->nAvgBytesPerSec = format->nSamplesPerSec * format->nBlockAlign; + format->cbSize = sizeof(WAVEFORMATEXTENSIBLE) - sizeof(WAVEFORMATEX); + + // Add the parts which are unique to WAVE_FORMAT_EXTENSIBLE. + format_.Samples.wValidBitsPerSample = params.bits_per_sample(); + format_.dwChannelMask = KSAUDIO_SPEAKER_STEREO; + format_.SubFormat = KSDATAFORMAT_SUBTYPE_PCM; + + // Store size (in different units) of audio packets which we expect to + // get from the audio endpoint device in each render event. + packet_size_frames_ = params.GetBytesPerBuffer() / format->nBlockAlign; + float packet_size_ms = (1000.0 * packet_size_frames_) / params.sample_rate(); + DVLOG(1) << "Number of bytes per audio frame : " << format->nBlockAlign; + DVLOG(1) << "Number of audio frames per packet: " << packet_size_frames_; + DVLOG(1) << "Number of milliseconds per packet: " << packet_size_ms; + + // All events are auto-reset events and non-signaled initially. + + // Create the event which the audio engine will signal each time a buffer + // has been recorded. + capture_event_.Set(CreateEvent(NULL, FALSE, FALSE, NULL)); + + // Create the event which will be set in Stop() when straeming shall stop. + stop_streaming_event_.Set(CreateEvent(NULL, FALSE, FALSE, NULL)); +} + +WASAPIUnifiedStream::~WASAPIUnifiedStream() { +} + +bool WASAPIUnifiedStream::Open() { + DCHECK_EQ(GetCurrentThreadId(), creating_thread_id_); + if (opened_) + return true; + + if (!HasUnifiedDefaultIO()) { + LOG(ERROR) << "Unified audio I/O is not supported."; + return false; + } + + // Render side: + // IMMDeviceEnumerator -> IMMDevice + // IMMDevice -> IAudioClient + // IAudioClient -> IAudioRenderClient + + ScopedComPtr<IMMDevice> render_device = CreateDefaultAudioDevice(eRender); + if (!render_device) + return false; + + ScopedComPtr<IAudioClient> audio_output_client = + CreateAudioClient(render_device); + if (!audio_output_client) + return false; + + if (!IsFormatSupported(audio_output_client, &format_)) + return false; + + ScopedComPtr<IAudioRenderClient> audio_render_client = + CreateAudioRenderClient(audio_output_client); + if (!audio_render_client) + return false; + + // Capture side: + // IMMDeviceEnumerator -> IMMDevice + // IMMDevice -> IAudioClient + // IAudioClient -> IAudioCaptureClient + + ScopedComPtr<IMMDevice> capture_device = CreateDefaultAudioDevice(eCapture); + if (!capture_device) + return false; + + ScopedComPtr<IAudioClient> audio_input_client = + CreateAudioClient(capture_device); + if (!audio_input_client) + return false; + + if (!IsFormatSupported(audio_input_client, &format_)) + return false; + + ScopedComPtr<IAudioCaptureClient> audio_capture_client = + CreateAudioCaptureClient(audio_input_client); + if (!audio_capture_client) + return false; + + // Set the event handle that the audio engine will signal each time + // a buffer becomes ready to be processed by the client. + HRESULT hr = audio_input_client->SetEventHandle(capture_event_.Get()); + if (FAILED(hr)) + return false; + + // Store all valid COM interfaces. + audio_output_client_ = audio_output_client; + audio_render_client_ = audio_render_client; + audio_input_client_ = audio_input_client; + audio_capture_client_ = audio_capture_client; + + opened_ = true; + return SUCCEEDED(hr); +} + +void WASAPIUnifiedStream::Start(AudioSourceCallback* callback) { + DCHECK_EQ(GetCurrentThreadId(), creating_thread_id_); + CHECK(callback); + CHECK(opened_); + + if (audio_io_thread_.get()) { + CHECK_EQ(callback, source_); + return; + } + + source_ = callback; + + // Avoid start-up glitches by filling up the endpoint buffer with "silence" + // before starting the stream. + BYTE* data_ptr = NULL; + HRESULT hr = audio_render_client_->GetBuffer( + endpoint_render_buffer_size_frames_, &data_ptr); + if (FAILED(hr)) { + DLOG(ERROR) << "Failed to use rendering audio buffer: " << std::hex << hr; + return; + } + + // Using the AUDCLNT_BUFFERFLAGS_SILENT flag eliminates the need to + // explicitly write silence data to the rendering buffer. + audio_render_client_->ReleaseBuffer(endpoint_render_buffer_size_frames_, + AUDCLNT_BUFFERFLAGS_SILENT); + + // Sanity check: verify that the endpoint buffer is filled with silence. + UINT32 num_queued_frames = 0; + audio_output_client_->GetCurrentPadding(&num_queued_frames); + DCHECK(num_queued_frames == endpoint_render_buffer_size_frames_); + + // Create and start the thread that will capturing and rendering. + audio_io_thread_.reset( + new base::DelegateSimpleThread(this, "wasapi_io_thread")); + audio_io_thread_->Start(); + if (!audio_io_thread_->HasBeenStarted()) { + DLOG(ERROR) << "Failed to start WASAPI IO thread."; + return; + } + + // Start input streaming data between the endpoint buffer and the audio + // engine. + hr = audio_input_client_->Start(); + if (FAILED(hr)) { + StopAndJoinThread(hr); + return; + } + + // Start output streaming data between the endpoint buffer and the audio + // engine. + hr = audio_output_client_->Start(); + if (FAILED(hr)) { + StopAndJoinThread(hr); + return; + } +} + +void WASAPIUnifiedStream::Stop() { + DCHECK_EQ(GetCurrentThreadId(), creating_thread_id_); + if (!audio_io_thread_.get()) + return; + + // Stop input audio streaming. + HRESULT hr = audio_input_client_->Stop(); + if (FAILED(hr)) { + DLOG_IF(ERROR, hr != AUDCLNT_E_NOT_INITIALIZED) + << "Failed to stop input streaming: " << std::hex << hr; + } + + // Stop output audio streaming. + hr = audio_output_client_->Stop(); + if (FAILED(hr)) { + DLOG_IF(ERROR, hr != AUDCLNT_E_NOT_INITIALIZED) + << "Failed to stop output streaming: " << std::hex << hr; + } + + // Wait until the thread completes and perform cleanup. + SetEvent(stop_streaming_event_.Get()); + audio_io_thread_->Join(); + audio_io_thread_.reset(); + + // Ensure that we don't quit the main thread loop immediately next + // time Start() is called. + ResetEvent(stop_streaming_event_.Get()); + + // Clear source callback, it'll be set again on the next Start() call. + source_ = NULL; + + // Flush all pending data and reset the audio clock stream position to 0. + hr = audio_output_client_->Reset(); + if (FAILED(hr)) { + DLOG_IF(ERROR, hr != AUDCLNT_E_NOT_INITIALIZED) + << "Failed to reset output streaming: " << std::hex << hr; + } + + audio_input_client_->Reset(); + if (FAILED(hr)) { + DLOG_IF(ERROR, hr != AUDCLNT_E_NOT_INITIALIZED) + << "Failed to reset input streaming: " << std::hex << hr; + } + + // Extra safety check to ensure that the buffers are cleared. + // If the buffers are not cleared correctly, the next call to Start() + // would fail with AUDCLNT_E_BUFFER_ERROR at IAudioRenderClient::GetBuffer(). + // This check is is only needed for shared-mode streams. + UINT32 num_queued_frames = 0; + audio_output_client_->GetCurrentPadding(&num_queued_frames); + DCHECK_EQ(0u, num_queued_frames); +} + +void WASAPIUnifiedStream::Close() { + DCHECK_EQ(GetCurrentThreadId(), creating_thread_id_); + + // It is valid to call Close() before calling open or Start(). + // It is also valid to call Close() after Start() has been called. + Stop(); + + // Inform the audio manager that we have been closed. This will cause our + // destruction. + manager_->ReleaseOutputStream(this); +} + +void WASAPIUnifiedStream::SetVolume(double volume) { + NOTIMPLEMENTED(); +} + +void WASAPIUnifiedStream::GetVolume(double* volume) { + NOTIMPLEMENTED(); +} + +// static +bool WASAPIUnifiedStream::HasUnifiedDefaultIO() { + int output_size = HardwareBufferSize(eRender); + int input_size = HardwareBufferSize(eCapture); + int output_channels = HardwareChannelCount(eRender); + int input_channels = HardwareChannelCount(eCapture); + return ((output_size == input_size) && (output_channels == input_channels)); +} + +// static +int WASAPIUnifiedStream::HardwareChannelCount(EDataFlow data_flow) { + base::win::ScopedCoMem<WAVEFORMATPCMEX> format_ex; + HRESULT hr = GetMixFormat( + data_flow, reinterpret_cast<WAVEFORMATEX**>(&format_ex)); + if (FAILED(hr)) + return 0; + + // Number of channels in the stream. Corresponds to the number of bits + // set in the dwChannelMask. + std::string flow = (data_flow == eCapture) ? "[in] " : "[out] "; + DVLOG(1) << flow << "endpoint channels: " + << format_ex->Format.nChannels; + + return static_cast<int>(format_ex->Format.nChannels); +} + +// static +int WASAPIUnifiedStream::HardwareSampleRate(EDataFlow data_flow) { + base::win::ScopedCoMem<WAVEFORMATEX> format; + HRESULT hr = GetMixFormat(data_flow, &format); + if (FAILED(hr)) + return 0; + + std::string flow = (data_flow == eCapture) ? "[in] " : "[out] "; + DVLOG(1) << flow << "nSamplesPerSec: " << format->nSamplesPerSec; + return static_cast<int>(format->nSamplesPerSec); +} + +// static +int WASAPIUnifiedStream::HardwareBufferSize(EDataFlow data_flow) { + int sample_rate = HardwareSampleRate(data_flow); + if (sample_rate == 0) + return 0; + + // Number of 100-nanosecond units per second. + const float kRefTimesPerSec = 10000000.0f; + + // A typical value of |device_period| is 100000 which corresponds to + // 0.01 seconds or 10 milliseconds. Given a sample rate of 48000 Hz, + // this device period results in a |buffer_size| of 480 audio frames. + REFERENCE_TIME device_period = GetAudioEngineDevicePeriod(data_flow); + int buffer_size = static_cast<int>( + ((sample_rate * device_period) / kRefTimesPerSec) + 0.5); + std::string flow = (data_flow == eCapture) ? "[in] " : "[out] "; + DVLOG(1) << flow << "buffer size: " << buffer_size; + + return buffer_size; +} + +void WASAPIUnifiedStream::Run() { + ScopedCOMInitializer com_init(ScopedCOMInitializer::kMTA); + + // Increase the thread priority. + audio_io_thread_->SetThreadPriority(base::kThreadPriority_RealtimeAudio); + + // Enable MMCSS to ensure that this thread receives prioritized access to + // CPU resources. + // TODO(henrika): investigate if it is possible to include these additional + // settings in SetThreadPriority() as well. + DWORD task_index = 0; + HANDLE mm_task = avrt::AvSetMmThreadCharacteristics(L"Pro Audio", + &task_index); + bool mmcss_is_ok = + (mm_task && avrt::AvSetMmThreadPriority(mm_task, AVRT_PRIORITY_CRITICAL)); + if (!mmcss_is_ok) { + // Failed to enable MMCSS on this thread. It is not fatal but can lead + // to reduced QoS at high load. + DWORD err = GetLastError(); + LOG(WARNING) << "Failed to enable MMCSS (error code=" << err << ")."; + } + + HRESULT hr = S_FALSE; + + bool streaming = true; + bool error = false; + HANDLE wait_array[] = { stop_streaming_event_, + capture_event_ }; + + const int bytes_per_sample = format_.Format.wBitsPerSample >> 3; + + // Keep streaming audio until the stop, or error, event is signaled. + // The current implementation uses capture events as driving mechanism since + // extensive testing has shown that it gives us a more reliable callback + // sequence compared with a scheme where both capture and render events are + // utilized. + while (streaming && !error) { + // Wait for a close-down event, or a new capture event. + DWORD wait_result = WaitForMultipleObjects(arraysize(wait_array), + wait_array, + FALSE, + INFINITE); + switch (wait_result) { + case WAIT_OBJECT_0 + 0: + // |stop_streaming_event_| has been set. + streaming = false; + break; + case WAIT_OBJECT_0 + 1: + // |capture_event_| has been set + { + TRACE_EVENT0("audio", "WASAPIUnifiedStream::Run"); + + // --- Capture --- + + BYTE* data_ptr = NULL; + UINT32 num_captured_frames = 0; + DWORD flags = 0; + UINT64 device_position = 0; + UINT64 first_audio_frame_timestamp = 0; + + // Retrieve the amount of data in the capture endpoint buffer. + hr = audio_capture_client_->GetBuffer(&data_ptr, + &num_captured_frames, + &flags, + &device_position, + &first_audio_frame_timestamp); + if (FAILED(hr)) { + DLOG(ERROR) << "Failed to get data from the capture buffer"; + continue; + } + + if (num_captured_frames != 0) { + if (flags & AUDCLNT_BUFFERFLAGS_SILENT) { + // Clear out the capture buffer since silence is reported. + capture_bus_->Zero(); + } else { + // Store captured data in an audio bus after de-interleaving + // the data to match the audio bus structure. + capture_bus_->FromInterleaved( + data_ptr, num_captured_frames, bytes_per_sample); + } + } + + hr = audio_capture_client_->ReleaseBuffer(num_captured_frames); + DLOG_IF(ERROR, FAILED(hr)) << "Failed to release capture buffer"; + + // Prepare for rendering by calling OnMoreIOData(). + int frames_filled = source_->OnMoreIOData( + capture_bus_.get(), + render_bus_.get(), + AudioBuffersState(0, 0)); + DCHECK_EQ(frames_filled, render_bus_->frames()); + + // --- Render --- + + // Derive the the amount of available space in the endpoint buffer. + // Avoid render attempt if there is no room for a captured packet. + UINT32 num_queued_frames = 0; + audio_output_client_->GetCurrentPadding(&num_queued_frames); + if (endpoint_render_buffer_size_frames_ - num_queued_frames < + packet_size_frames_) + continue; + + // Grab all available space in the rendering endpoint buffer + // into which the client can write a data packet. + uint8* audio_data = NULL; + hr = audio_render_client_->GetBuffer(packet_size_frames_, + &audio_data); + if (FAILED(hr)) { + DLOG(ERROR) << "Failed to access render buffer"; + continue; + } + + // Convert the audio bus content to interleaved integer data using + // |audio_data| as destination. + render_bus_->ToInterleaved( + packet_size_frames_, bytes_per_sample, audio_data); + + // Release the buffer space acquired in the GetBuffer() call. + audio_render_client_->ReleaseBuffer(packet_size_frames_, 0); + DLOG_IF(ERROR, FAILED(hr)) << "Failed to release render buffer"; + } + break; + default: + error = true; + break; + } + } + + if (streaming && error) { + // Stop audio streaming since something has gone wrong in our main thread + // loop. Note that, we are still in a "started" state, hence a Stop() call + // is required to join the thread properly. + audio_input_client_->Stop(); + audio_output_client_->Stop(); + PLOG(ERROR) << "WASAPI streaming failed."; + } + + // Disable MMCSS. + if (mm_task && !avrt::AvRevertMmThreadCharacteristics(mm_task)) { + PLOG(WARNING) << "Failed to disable MMCSS"; + } +} + +void WASAPIUnifiedStream::HandleError(HRESULT err) { + CHECK((started() && GetCurrentThreadId() == audio_io_thread_->tid()) || + (!started() && GetCurrentThreadId() == creating_thread_id_)); + NOTREACHED() << "Error code: " << std::hex << err; + if (source_) + source_->OnError(this, static_cast<int>(err)); +} + +void WASAPIUnifiedStream::StopAndJoinThread(HRESULT err) { + CHECK(GetCurrentThreadId() == creating_thread_id_); + DCHECK(audio_io_thread_.get()); + SetEvent(stop_streaming_event_.Get()); + audio_io_thread_->Join(); + audio_io_thread_.reset(); + HandleError(err); +} + +ScopedComPtr<IAudioRenderClient> WASAPIUnifiedStream::CreateAudioRenderClient( + IAudioClient* audio_client) { + ScopedComPtr<IAudioRenderClient> audio_render_client; + HRESULT hr = S_FALSE; + + // Initialize the audio stream between the client and the device in shared + // push mode (will not signal an event). + hr = audio_client->Initialize(AUDCLNT_SHAREMODE_SHARED, + AUDCLNT_STREAMFLAGS_NOPERSIST, + 0, + 0, + reinterpret_cast<WAVEFORMATEX*>(&format_), + NULL); + if (FAILED(hr)) { + LOG(WARNING) << "IAudioClient::Initialize() failed: " << std::hex << hr; + return audio_render_client; + } + + // Retrieve the length of the render endpoint buffer shared between the + // client and the audio engine. + hr = audio_client->GetBufferSize(&endpoint_render_buffer_size_frames_); + if (FAILED(hr)) + return audio_render_client; + DVLOG(1) << "render endpoint buffer size: " + << endpoint_render_buffer_size_frames_ << " [frames]"; + + // Get access to the IAudioRenderClient interface. This interface + // enables us to write output data to a rendering endpoint buffer. + hr = audio_client->GetService(__uuidof(IAudioRenderClient), + audio_render_client.ReceiveVoid()); + if (FAILED(hr)) { + LOG(WARNING) << "IAudioClient::GetService() failed: " << std::hex << hr; + return audio_render_client; + } + + return audio_render_client; +} + +ScopedComPtr<IAudioCaptureClient> +WASAPIUnifiedStream::CreateAudioCaptureClient(IAudioClient* audio_client) { + ScopedComPtr<IAudioCaptureClient> audio_capture_client; + HRESULT hr = S_FALSE; + + // Use event driven audio-buffer processing, i.e, the audio engine will + // inform us (by signaling an event) when data has been recorded. + hr = audio_client->Initialize(AUDCLNT_SHAREMODE_SHARED, + AUDCLNT_STREAMFLAGS_EVENTCALLBACK | + AUDCLNT_STREAMFLAGS_NOPERSIST, + 0, + 0, + reinterpret_cast<WAVEFORMATEX*>(&format_), + NULL); + if (FAILED(hr)) + return audio_capture_client; + + // Retrieve the length of the capture endpoint buffer shared between the + // client and the audio engine. + hr = audio_client->GetBufferSize(&endpoint_capture_buffer_size_frames_); + if (FAILED(hr)) + return audio_capture_client; + DVLOG(1) << "capture endpoint buffer size: " + << endpoint_capture_buffer_size_frames_ << " [frames]"; + + // Get access to the IAudioCaptureClient interface. This interface + // enables us to read input data from the capture endpoint buffer. + hr = audio_client->GetService(__uuidof(IAudioCaptureClient), + audio_capture_client.ReceiveVoid()); + + return audio_capture_client; +} + +} // namespace media diff --git a/media/audio/win/audio_unified_win.h b/media/audio/win/audio_unified_win.h new file mode 100644 index 0000000..30dc609 --- /dev/null +++ b/media/audio/win/audio_unified_win.h @@ -0,0 +1,192 @@ +// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef MEDIA_AUDIO_WIN_AUDIO_UNIFIED_WIN_H_
+#define MEDIA_AUDIO_WIN_AUDIO_UNIFIED_WIN_H_
+
+#include <Audioclient.h>
+#include <MMDeviceAPI.h>
+
+#include <string>
+
+#include "base/compiler_specific.h"
+#include "base/gtest_prod_util.h"
+#include "base/threading/platform_thread.h"
+#include "base/threading/simple_thread.h"
+#include "base/win/scoped_co_mem.h"
+#include "base/win/scoped_comptr.h"
+#include "base/win/scoped_handle.h"
+#include "media/audio/audio_io.h"
+#include "media/audio/audio_parameters.h"
+#include "media/base/media_export.h"
+
+namespace media {
+
+class AudioManagerWin;
+
+// Implementation of AudioOutputStream for Windows using the WASAPI Core
+// Audio interface where both capturing and rendering takes place on the
+// same thread to enable audio I/O.
+//
+// Best performance is achieved by using a buffer size given by the static
+// HardwareBufferSize() method. The user should also ensure that audio I/O
+// is supported by calling HasUnifiedDefaultIO().
+//
+// Implementation notes:
+//
+// - Certain conditions must be fulfilled to support audio I/O:
+// o Both capture and render side must use the same sample rate.
+// o Both capture and render side must use the same channel count.
+// o See HasUnifiedDefaultIO() for more details.
+//
+// TODO(henrika):
+//
+// - Add multi-channel support.
+// - Add support for non-matching sample rates.
+// - Add support for exclusive mode.
+//
+class MEDIA_EXPORT WASAPIUnifiedStream
+ : public AudioOutputStream,
+ public base::DelegateSimpleThread::Delegate {
+ public:
+ // The ctor takes all the usual parameters, plus |manager| which is the
+ // the audio manager who is creating this object.
+ WASAPIUnifiedStream(AudioManagerWin* manager,
+ const AudioParameters& params);
+
+ // The dtor is typically called by the AudioManager only and it is usually
+ // triggered by calling AudioOutputStream::Close().
+ virtual ~WASAPIUnifiedStream();
+
+ // Implementation of AudioOutputStream.
+ virtual bool Open() OVERRIDE;
+ virtual void Start(AudioSourceCallback* callback) OVERRIDE;
+ virtual void Stop() OVERRIDE;
+ virtual void Close() OVERRIDE;
+ virtual void SetVolume(double volume) OVERRIDE;
+ virtual void GetVolume(double* volume) OVERRIDE;
+
+ // Returns true if all conditions to support audio IO are fulfilled.
+ // Input and output sides of the Audio Engine must use the same native
+ // device period (requires e.g. identical sample rates) and have the same
+ // channel count.
+ static bool HasUnifiedDefaultIO();
+
+ // Retrieves the number of channels the audio engine uses for its internal
+ // processing/mixing of shared-mode streams for the default endpoint device
+ // and in the given direction.
+ static int HardwareChannelCount(EDataFlow data_flow);
+
+ // Retrieves the sample rate the audio engine uses for its internal
+ // processing/mixing of shared-mode streams for the default endpoint device
+ // and in the given direction.
+ static int HardwareSampleRate(EDataFlow data_flow);
+
+ // Retrieves the preferred buffer size for the default endpoint device and
+ // in the given direction. The recommended size is given by the mixing
+ // sample rate and the native device period for the audio device.
+ // Unit is in number of audio frames.
+ // Examples:
+ // fs = 96000 Hz => 960
+ // fs = 48000 Hz => 480
+ // fs = 44100 Hz => 441 or 448 (depends on the audio hardware)
+ static int HardwareBufferSize(EDataFlow data_flow);
+
+ bool started() const {
+ return audio_io_thread_.get() != NULL;
+ }
+
+ private:
+ // DelegateSimpleThread::Delegate implementation.
+ virtual void Run() OVERRIDE;
+
+ // Issues the OnError() callback to the |source_|.
+ void HandleError(HRESULT err);
+
+ // Stops and joins the audio thread in case of an error.
+ void StopAndJoinThread(HRESULT err);
+
+ // Helper methods which uses an IAudioClient to create and setup
+ // IAudio[Render|Capture]Clients.
+ base::win::ScopedComPtr<IAudioRenderClient> CreateAudioRenderClient(
+ IAudioClient* audio_client);
+ base::win::ScopedComPtr<IAudioCaptureClient> CreateAudioCaptureClient(
+ IAudioClient* audio_client);
+
+ // Converts unique endpoint ID to user-friendly device name.
+ std::string GetDeviceName(LPCWSTR device_id) const;
+
+ // Returns the number of channels the audio engine uses for its internal
+ // processing/mixing of shared-mode streams for the default endpoint device.
+ int endpoint_channel_count() { return format_.Format.nChannels; }
+
+ // Contains the thread ID of the creating thread.
+ base::PlatformThreadId creating_thread_id_;
+
+ // Our creator, the audio manager needs to be notified when we close.
+ AudioManagerWin* manager_;
+
+ // Rendering and capturing is driven by this thread (no message loop).
+ // All OnMoreIOData() callbacks will be called from this thread.
+ scoped_ptr<base::DelegateSimpleThread> audio_io_thread_;
+
+ // Contains the desired audio format which is set up at construction.
+ // Extended PCM waveform format structure based on WAVEFORMATEXTENSIBLE.
+ // Use this for multiple channel and hi-resolution PCM data.
+ WAVEFORMATPCMEX format_;
+
+ // True when successfully opened.
+ bool opened_;
+
+ // Size in bytes of each audio frame (4 bytes for 16-bit stereo PCM).
+ size_t frame_size_;
+
+ // Size in audio frames of each audio packet where an audio packet
+ // is defined as the block of data which the source is expected to deliver
+ // in each OnMoreIOData() callback.
+ size_t packet_size_frames_;
+
+ // Length of the audio endpoint buffer.
+ size_t endpoint_render_buffer_size_frames_;
+ size_t endpoint_capture_buffer_size_frames_;
+
+ // Pointer to the client that will deliver audio samples to be played out.
+ AudioSourceCallback* source_;
+
+ // IMMDevice interfaces which represents audio endpoint devices.
+ base::win::ScopedComPtr<IMMDevice> endpoint_render_device_;
+ base::win::ScopedComPtr<IMMDevice> endpoint_capture_device_;
+
+ // IAudioClient interfaces which enables a client to create and initialize
+ // an audio stream between an audio application and the audio engine.
+ base::win::ScopedComPtr<IAudioClient> audio_output_client_;
+ base::win::ScopedComPtr<IAudioClient> audio_input_client_;
+
+ // IAudioRenderClient interfaces enables a client to write output
+ // data to a rendering endpoint buffer.
+ base::win::ScopedComPtr<IAudioRenderClient> audio_render_client_;
+
+ // IAudioCaptureClient interfaces enables a client to read input
+ // data from a capturing endpoint buffer.
+ base::win::ScopedComPtr<IAudioCaptureClient> audio_capture_client_;
+
+ // The audio engine will signal this event each time a buffer has been
+ // recorded.
+ base::win::ScopedHandle capture_event_;
+
+ // This event will be signaled when streaming shall stop.
+ base::win::ScopedHandle stop_streaming_event_;
+
+ // Container for retrieving data from AudioSourceCallback::OnMoreIOData().
+ scoped_ptr<AudioBus> render_bus_;
+
+ // Container for sending data to AudioSourceCallback::OnMoreIOData().
+ scoped_ptr<AudioBus> capture_bus_;
+
+ DISALLOW_COPY_AND_ASSIGN(WASAPIUnifiedStream);
+};
+
+} // namespace media
+
+#endif // MEDIA_AUDIO_WIN_AUDIO_UNIFIED_WIN_H_
diff --git a/media/audio/win/audio_unified_win_unittest.cc b/media/audio/win/audio_unified_win_unittest.cc new file mode 100644 index 0000000..952fe2d --- /dev/null +++ b/media/audio/win/audio_unified_win_unittest.cc @@ -0,0 +1,280 @@ +// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/basictypes.h"
+#include "base/command_line.h"
+#include "base/file_util.h"
+#include "base/message_loop.h"
+#include "base/path_service.h"
+#include "base/test/test_timeouts.h"
+#include "base/time.h"
+#include "base/win/scoped_com_initializer.h"
+#include "media/audio/audio_io.h"
+#include "media/audio/audio_manager.h"
+#include "media/audio/audio_util.h"
+#include "media/audio/win/audio_unified_win.h"
+#include "media/base/media_switches.h"
+#include "testing/gmock/include/gmock/gmock.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+using ::testing::_;
+using ::testing::AtLeast;
+using ::testing::Between;
+using ::testing::DoAll;
+using ::testing::NotNull;
+using ::testing::Return;
+using base::win::ScopedCOMInitializer;
+
+namespace media {
+
+static const size_t kMaxDeltaSamples = 1000;
+static const char* kDeltaTimeMsFileName = "unified_delta_times_ms.txt";
+
+// Used to terminate a loop from a different thread than the loop belongs to.
+// |loop| should be a MessageLoopProxy.
+ACTION_P(QuitLoop, loop) {
+ loop->PostTask(FROM_HERE, MessageLoop::QuitClosure());
+}
+
+class MockUnifiedSourceCallback
+ : public AudioOutputStream::AudioSourceCallback {
+ public:
+ MOCK_METHOD2(OnMoreData, int(AudioBus* audio_bus,
+ AudioBuffersState buffers_state));
+ MOCK_METHOD3(OnMoreIOData, int(AudioBus* source,
+ AudioBus* dest,
+ AudioBuffersState buffers_state));
+ MOCK_METHOD2(OnError, void(AudioOutputStream* stream, int code));
+};
+
+// AudioOutputStream::AudioSourceCallback implementation which enables audio
+// play-through. It also creates a text file that contains times between two
+// successive callbacks. Units are in milliseconds. This file can be used for
+// off-line analysis of the callback sequence.
+class UnifiedSourceCallback : public AudioOutputStream::AudioSourceCallback {
+ public:
+ explicit UnifiedSourceCallback()
+ : previous_call_time_(base::Time::Now()),
+ text_file_(NULL),
+ elements_to_write_(0) {
+ delta_times_.reset(new int[kMaxDeltaSamples]);
+ }
+
+ virtual ~UnifiedSourceCallback() {
+ FilePath file_name;
+ EXPECT_TRUE(PathService::Get(base::DIR_EXE, &file_name));
+ file_name = file_name.AppendASCII(kDeltaTimeMsFileName);
+
+ EXPECT_TRUE(!text_file_);
+ text_file_ = file_util::OpenFile(file_name, "wt");
+ DLOG_IF(ERROR, !text_file_) << "Failed to open log file.";
+ LOG(INFO) << ">> Output file " << file_name.value() << " has been created.";
+
+ // Write the array which contains delta times to a text file.
+ size_t elements_written = 0;
+ while (elements_written < elements_to_write_) {
+ fprintf(text_file_, "%d\n", delta_times_[elements_written]);
+ ++elements_written;
+ }
+ file_util::CloseFile(text_file_);
+ }
+
+ virtual int OnMoreData(AudioBus* dest,
+ AudioBuffersState buffers_state) {
+ NOTREACHED();
+ return 0;
+ };
+
+ virtual int OnMoreIOData(AudioBus* source,
+ AudioBus* dest,
+ AudioBuffersState buffers_state) {
+ // Store time between this callback and the previous callback.
+ int diff = (base::Time::Now() - previous_call_time_).InMilliseconds();
+ previous_call_time_ = base::Time::Now();
+ if (elements_to_write_ < kMaxDeltaSamples) {
+ delta_times_[elements_to_write_] = diff;
+ ++elements_to_write_;
+ }
+
+ // Play out the recorded audio samples in loop back.
+ source->CopyTo(dest);
+ return source->frames();
+ };
+
+ virtual void OnError(AudioOutputStream* stream, int code) {
+ NOTREACHED();
+ }
+
+ private:
+ base::Time previous_call_time_;
+ scoped_array<int> delta_times_;
+ FILE* text_file_;
+ size_t elements_to_write_;
+};
+
+// Convenience method which ensures that we fulfill all required conditions
+// to run unified audio tests on Windows.
+static bool CanRunUnifiedAudioTests(AudioManager* audio_man) {
+ const CommandLine* cmd_line = CommandLine::ForCurrentProcess();
+ if (!cmd_line->HasSwitch(switches::kEnableWebAudioInput)) {
+ DVLOG(1) << "--enable-webaudio-input must be defined to run this test.";
+ return false;
+ }
+
+ if (!media::IsWASAPISupported()) {
+ LOG(WARNING) << "This tests requires Windows Vista or higher.";
+ return false;
+ }
+
+ if (!audio_man->HasAudioOutputDevices()) {
+ LOG(WARNING) << "No output devices detected.";
+ return false;
+ }
+
+ if (!audio_man->HasAudioInputDevices()) {
+ LOG(WARNING) << "No input devices detected.";
+ return false;
+ }
+
+ if (!WASAPIUnifiedStream::HasUnifiedDefaultIO()) {
+ LOG(WARNING) << "Audio IO is not supported.";
+ return false;
+ }
+
+ return true;
+}
+
+// Convenience class which simplifies creation of a unified AudioOutputStream
+// object.
+class AudioUnifiedStreamWrapper {
+ public:
+ explicit AudioUnifiedStreamWrapper(AudioManager* audio_manager)
+ : com_init_(ScopedCOMInitializer::kMTA),
+ audio_man_(audio_manager),
+ format_(AudioParameters::AUDIO_PCM_LOW_LATENCY),
+ channel_layout_(CHANNEL_LAYOUT_STEREO),
+ bits_per_sample_(16) {
+ sample_rate_ = media::GetAudioHardwareSampleRate();
+ samples_per_packet_ = media::GetAudioHardwareBufferSize();
+ }
+
+ ~AudioUnifiedStreamWrapper() {}
+
+ // Creates AudioOutputStream object using default parameters.
+ WASAPIUnifiedStream* Create() {
+ return static_cast<WASAPIUnifiedStream*> (CreateOutputStream());
+ }
+
+ AudioParameters::Format format() const { return format_; }
+ int channels() const { return ChannelLayoutToChannelCount(channel_layout_); }
+ int bits_per_sample() const { return bits_per_sample_; }
+ int sample_rate() const { return sample_rate_; }
+ int samples_per_packet() const { return samples_per_packet_; }
+
+ private:
+ AudioOutputStream* CreateOutputStream() {
+ AudioOutputStream* aos = audio_man_->MakeAudioOutputStream(
+ AudioParameters(format_, channel_layout_, sample_rate_,
+ bits_per_sample_, samples_per_packet_));
+ EXPECT_TRUE(aos);
+ return aos;
+ }
+
+ ScopedCOMInitializer com_init_;
+ AudioManager* audio_man_;
+
+ AudioParameters::Format format_;
+ ChannelLayout channel_layout_;
+ int bits_per_sample_;
+ int sample_rate_;
+ int samples_per_packet_;
+};
+
+// Convenience method which creates a default WASAPIUnifiedStream object.
+static WASAPIUnifiedStream* CreateDefaultUnifiedStream(
+ AudioManager* audio_manager) {
+ AudioUnifiedStreamWrapper aosw(audio_manager);
+ return aosw.Create();
+}
+
+// Test Open(), Close() calling sequence.
+TEST(WASAPIUnifiedStreamTest, OpenAndClose) {
+ scoped_ptr<AudioManager> audio_manager(AudioManager::Create());
+ if (!CanRunUnifiedAudioTests(audio_manager.get()))
+ return;
+
+ WASAPIUnifiedStream* wus = CreateDefaultUnifiedStream(audio_manager.get());
+ EXPECT_TRUE(wus->Open());
+ wus->Close();
+}
+
+// Test Open(), Start(), Close() calling sequence.
+TEST(WASAPIUnifiedStreamTest, OpenStartAndClose) {
+ scoped_ptr<AudioManager> audio_manager(AudioManager::Create());
+ if (!CanRunUnifiedAudioTests(audio_manager.get()))
+ return;
+
+ MockUnifiedSourceCallback source;
+ AudioUnifiedStreamWrapper ausw(audio_manager.get());
+ WASAPIUnifiedStream* wus = ausw.Create();
+
+ EXPECT_TRUE(wus->Open());
+ EXPECT_CALL(source, OnError(wus, _))
+ .Times(0);
+ EXPECT_CALL(source, OnMoreIOData(NotNull(), NotNull(), _))
+ .Times(Between(0, 1))
+ .WillOnce(Return(ausw.samples_per_packet()));
+ wus->Start(&source);
+ wus->Close();
+}
+
+// Verify that IO callbacks starts as they should.
+TEST(WASAPIUnifiedStreamTest, StartLoopbackAudio) {
+ scoped_ptr<AudioManager> audio_manager(AudioManager::Create());
+ if (!CanRunUnifiedAudioTests(audio_manager.get()))
+ return;
+
+ MessageLoopForUI loop;
+ MockUnifiedSourceCallback source;
+ AudioUnifiedStreamWrapper ausw(audio_manager.get());
+ WASAPIUnifiedStream* wus = ausw.Create();
+
+ EXPECT_TRUE(wus->Open());
+ EXPECT_CALL(source, OnError(wus, _))
+ .Times(0);
+ EXPECT_CALL(source, OnMoreIOData(NotNull(), NotNull(), _))
+ .Times(AtLeast(2))
+ .WillOnce(Return(ausw.samples_per_packet()))
+ .WillOnce(DoAll(
+ QuitLoop(loop.message_loop_proxy()),
+ Return(ausw.samples_per_packet())));
+ wus->Start(&source);
+ loop.PostDelayedTask(FROM_HERE, MessageLoop::QuitClosure(),
+ TestTimeouts::action_timeout());
+ loop.Run();
+ wus->Stop();
+ wus->Close();
+}
+
+// Perform a real-time test in loopback where the recorded audio is echoed
+// back to the speaker. This test allows the user to verify that the audio
+// sounds OK. A text file with name |kDeltaTimeMsFileName| is also generated.
+TEST(WASAPIUnifiedStreamTest, DISABLED_RealTimePlayThrough) {
+ scoped_ptr<AudioManager> audio_manager(AudioManager::Create());
+ if (!CanRunUnifiedAudioTests(audio_manager.get()))
+ return;
+
+ MessageLoopForUI loop;
+ UnifiedSourceCallback source;
+ WASAPIUnifiedStream* wus = CreateDefaultUnifiedStream(audio_manager.get());
+
+ EXPECT_TRUE(wus->Open());
+ wus->Start(&source);
+ loop.PostDelayedTask(FROM_HERE, MessageLoop::QuitClosure(),
+ base::TimeDelta::FromMilliseconds(10000));
+ loop.Run();
+ wus->Close();
+}
+
+} // namespace media
diff --git a/media/media.gyp b/media/media.gyp index 51a8f17..3168c33 100644 --- a/media/media.gyp +++ b/media/media.gyp @@ -139,6 +139,8 @@ 'audio/win/audio_low_latency_output_win.h', 'audio/win/audio_manager_win.cc', 'audio/win/audio_manager_win.h', + 'audio/win/audio_unified_win.cc', + 'audio/win/audio_unified_win.h', 'audio/win/avrt_wrapper_win.cc', 'audio/win/avrt_wrapper_win.h', 'audio/win/device_enumeration_win.cc', @@ -605,6 +607,7 @@ 'audio/win/audio_low_latency_input_win_unittest.cc', 'audio/win/audio_low_latency_output_win_unittest.cc', 'audio/win/audio_output_win_unittest.cc', + 'audio/win/audio_unified_win_unittest.cc', 'base/audio_bus_unittest.cc', 'base/audio_fifo_unittest.cc', 'base/audio_pull_fifo_unittest.cc', |