diff options
author | henrika@chromium.org <henrika@chromium.org@0039d316-1c4b-4281-b951-d872f2087c98> | 2011-10-24 09:41:12 +0000 |
---|---|---|
committer | henrika@chromium.org <henrika@chromium.org@0039d316-1c4b-4281-b951-d872f2087c98> | 2011-10-24 09:41:12 +0000 |
commit | f5968e6ada72f515d7d183e67e35f90d7704daee (patch) | |
tree | 28f4fcbe9850f52dc7196c5a1db394b0aa2d577a /media | |
parent | a80a258880e3ababe9f806dd9bb4d60ad90473ae (diff) | |
download | chromium_src-f5968e6ada72f515d7d183e67e35f90d7704daee.zip chromium_src-f5968e6ada72f515d7d183e67e35f90d7704daee.tar.gz chromium_src-f5968e6ada72f515d7d183e67e35f90d7704daee.tar.bz2 |
Low-latency AudioInputStream implementation based on WASAPI for Windows.
Requires Windows Vista or higher.
BUG=none
TEST=Attached unit test (requires undefined CHROME_HEADLESS)
Review URL: http://codereview.chromium.org/8283032
git-svn-id: svn://svn.chromium.org/chrome/trunk/src@106899 0039d316-1c4b-4281-b951-d872f2087c98
Diffstat (limited to 'media')
-rw-r--r-- | media/audio/audio_util.cc | 32 | ||||
-rw-r--r-- | media/audio/audio_util.h | 5 | ||||
-rw-r--r-- | media/audio/win/audio_low_latency_input_win.cc | 511 | ||||
-rw-r--r-- | media/audio/win/audio_low_latency_input_win.h | 182 | ||||
-rw-r--r-- | media/audio/win/audio_low_latency_input_win_unittest.cc | 368 | ||||
-rw-r--r-- | media/audio/win/audio_manager_win.cc | 14 | ||||
-rw-r--r-- | media/audio/win/audio_manager_win.h | 8 | ||||
-rw-r--r-- | media/audio/win/avrt_wrapper_win.cc | 64 | ||||
-rw-r--r-- | media/audio/win/avrt_wrapper_win.h | 39 | ||||
-rw-r--r-- | media/media.gyp | 5 |
10 files changed, 1220 insertions, 8 deletions
diff --git a/media/audio/audio_util.cc b/media/audio/audio_util.cc index ed29749f..6f365b2 100644 --- a/media/audio/audio_util.cc +++ b/media/audio/audio_util.cc @@ -14,10 +14,17 @@ #include "base/basictypes.h" #include "base/logging.h" #include "base/shared_memory.h" +#if defined(OS_WIN) +#include "base/win/windows_version.h" +#endif #include "media/audio/audio_util.h" #if defined(OS_MACOSX) +#include "media/audio/mac/audio_low_latency_input_mac.h" #include "media/audio/mac/audio_low_latency_output_mac.h" #endif +#if defined(OS_WIN) +#include "media/audio/win/audio_low_latency_input_win.h" +#endif using base::subtle::Atomic32; @@ -230,8 +237,7 @@ void InterleaveFloatToInt16(const std::vector<float*>& source, } } -double GetAudioHardwareSampleRate() -{ +double GetAudioHardwareSampleRate() { #if defined(OS_MACOSX) // Hardware sample-rate on the Mac can be configured, so we must query. return AUAudioOutputStream::HardwareSampleRate(); @@ -242,6 +248,28 @@ double GetAudioHardwareSampleRate() #endif } +double GetAudioInputHardwareSampleRate() { +#if defined(OS_MACOSX) + // Hardware sample-rate on the Mac can be configured, so we must query. + return AUAudioInputStream::HardwareSampleRate(); +#elif defined(OS_WIN) + if (base::win::GetVersion() <= base::win::VERSION_XP) { + // Fall back to Windows Wave implementation on Windows XP or lower + // and use 48kHz as default input sample rate. + return 48000.0; + } else { + // Hardware sample-rate on Windows can be configured, so we must query. + // TODO(henrika): improve possibility to specify audio endpoint. + // Use the default device (same as for Wave) for now to be compatible. + return WASAPIAudioInputStream::HardwareSampleRate(eConsole); + } +#else + // Hardware for Linux is nearly always 48KHz. + // TODO(henrika): return correct value in rare non-48KHz cases. + return 48000.0; +#endif +} + size_t GetAudioHardwareBufferSize() { // The sizes here were determined by experimentation and are roughly // the lowest value (for low latency) that still allowed glitch-free diff --git a/media/audio/audio_util.h b/media/audio/audio_util.h index 6ea697b..05896af 100644 --- a/media/audio/audio_util.h +++ b/media/audio/audio_util.h @@ -79,9 +79,12 @@ MEDIA_EXPORT void InterleaveFloatToInt16(const std::vector<float*>& source, int16* destination, size_t number_of_frames); -// Returns the default audio hardware sample-rate. +// Returns the default audio output hardware sample-rate. MEDIA_EXPORT double GetAudioHardwareSampleRate(); +// Returns the default audio input hardware sample-rate. +MEDIA_EXPORT double GetAudioInputHardwareSampleRate(); + // Returns the optimal low-latency buffer size for the audio hardware. // This is the smallest buffer size the system can comfortably render // at without glitches. The buffer size is in sample-frames. diff --git a/media/audio/win/audio_low_latency_input_win.cc b/media/audio/win/audio_low_latency_input_win.cc new file mode 100644 index 0000000..5242b67 --- /dev/null +++ b/media/audio/win/audio_low_latency_input_win.cc @@ -0,0 +1,511 @@ +// Copyright (c) 2011 The Chromium Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#include "media/audio/win/audio_low_latency_input_win.h" + +#include "base/logging.h" +#include "base/memory/scoped_ptr.h" +#include "base/utf_string_conversions.h" +#include "media/audio/audio_util.h" +#include "media/audio/win/audio_manager_win.h" +#include "media/audio/win/avrt_wrapper_win.h" + +using base::win::ScopedComPtr; +using base::win::ScopedCOMInitializer; + +WASAPIAudioInputStream::WASAPIAudioInputStream( + AudioManagerWin* manager, const AudioParameters& params, ERole device_role) + : com_init_(ScopedCOMInitializer::kMTA), + manager_(manager), + capture_thread_(NULL), + opened_(false), + started_(false), + endpoint_buffer_size_frames_(0), + device_role_(device_role), + sink_(NULL) { + DCHECK(manager_); + + // Load the Avrt DLL if not already loaded. Required to support MMCSS. + bool avrt_init = avrt::Initialize(); + DCHECK(avrt_init) << "Failed to load the Avrt.dll"; + + // Set up the desired capture format specified by the client. + format_.nSamplesPerSec = params.sample_rate; + format_.wFormatTag = WAVE_FORMAT_PCM; + format_.wBitsPerSample = params.bits_per_sample; + format_.nChannels = params.channels; + format_.nBlockAlign = (format_.wBitsPerSample / 8) * format_.nChannels; + format_.nAvgBytesPerSec = format_.nSamplesPerSec * format_.nBlockAlign; + format_.cbSize = 0; + + // Size in bytes of each audio frame. + frame_size_ = format_.nBlockAlign; + // Store size of audio packets which we expect to get from the audio + // endpoint device in each capture event. + packet_size_frames_ = params.GetPacketSize() / format_.nBlockAlign; + packet_size_bytes_ = params.GetPacketSize(); + DVLOG(1) << "Number of bytes per audio frame : " << frame_size_; + DVLOG(1) << "Number of audio frames per packet: " << packet_size_frames_; + + // All events are auto-reset events and non-signaled initially. + + // Create the event which the audio engine will signal each time + // a buffer becomes ready to be processed by the client. + audio_samples_ready_event_.Set(CreateEvent(NULL, FALSE, FALSE, NULL)); + DCHECK(audio_samples_ready_event_.IsValid()); + + // Create the event which will be set in Stop() when capturing shall stop. + stop_capture_event_.Set(CreateEvent(NULL, FALSE, FALSE, NULL)); + DCHECK(stop_capture_event_.IsValid()); + + ms_to_frame_count_ = static_cast<double>(params.sample_rate) / 1000.0; + + LARGE_INTEGER performance_frequency; + if (QueryPerformanceFrequency(&performance_frequency)) { + perf_count_to_100ns_units_ = + (10000000.0 / static_cast<double>(performance_frequency.QuadPart)); + } else { + LOG(ERROR) << "High-resolution performance counters are not supported."; + perf_count_to_100ns_units_ = 0.0; + } +} + +WASAPIAudioInputStream::~WASAPIAudioInputStream() {} + +bool WASAPIAudioInputStream::Open() { + // Verify that we are not already opened. + if (opened_) + return false; + + // Obtain a reference to the IMMDevice interface of the default capturing + // device with the specified role. + HRESULT hr = SetCaptureDevice(device_role_); + if (FAILED(hr)) { + HandleError(hr); + return false; + } + + // Obtain an IAudioClient interface which enables us to create and initialize + // an audio stream between an audio application and the audio engine. + hr = ActivateCaptureDevice(); + if (FAILED(hr)) { + HandleError(hr); + return false; + } + + // Retrieve the stream format which the audio engine uses for its internal + // processing/mixing of shared-mode streams. + hr = GetAudioEngineStreamFormat(); + if (FAILED(hr)) { + HandleError(hr); + return false; + } + + // Verify that the selected audio endpoint supports the specified format + // set during construction. + if (!DesiredFormatIsSupported()) { + hr = E_INVALIDARG; + HandleError(hr); + return false; + } + + // Initialize the audio stream between the client and the device using + // shared mode and a lowest possible glitch-free latency. + hr = InitializeAudioEngine(); + if (FAILED(hr)) { + HandleError(hr); + return false; + } + + opened_ = true; + + return true; +} + +void WASAPIAudioInputStream::Start(AudioInputCallback* callback) { + DCHECK(callback); + DCHECK(opened_); + + if (!opened_) + return; + + if (started_) + return; + + sink_ = callback; + + // Create and start the thread that will drive the capturing by waiting for + // capture events. + capture_thread_ = + new base::DelegateSimpleThread(this, "wasapi_capture_thread"); + capture_thread_->Start(); + + // Start streaming data between the endpoint buffer and the audio engine. + HRESULT hr = audio_client_->Start(); + DLOG_IF(ERROR, FAILED(hr)) << "Failed to start input streaming."; + + started_ = SUCCEEDED(hr); +} + +void WASAPIAudioInputStream::Stop() { + if (!started_) + return; + + // Shut down the capture thread. + if (stop_capture_event_.IsValid()) { + SetEvent(stop_capture_event_.Get()); + } + + // Stop the input audio streaming. + HRESULT hr = audio_client_->Stop(); + if (FAILED(hr)) { + LOG(ERROR) << "Failed to stop input streaming."; + } + + // Wait until the thread completes and perform cleanup. + if (capture_thread_) { + SetEvent(stop_capture_event_.Get()); + capture_thread_->Join(); + capture_thread_ = NULL; + } + + started_ = false; +} + +void WASAPIAudioInputStream::Close() { + // It is valid to call Close() before calling open or Start(). + // It is also valid to call Close() after Start() has been called. + Stop(); + if (sink_) { + sink_->OnClose(this); + sink_ = NULL; + } + + // Inform the audio manager that we have been closed. This will cause our + // destruction. + manager_->ReleaseInputStream(this); +} + +// static +double WASAPIAudioInputStream::HardwareSampleRate(ERole device_role) { + // It is assumed that this static method is called from a COM thread, i.e., + // CoInitializeEx() is not called here to avoid STA/MTA conflicts. + ScopedComPtr<IMMDeviceEnumerator> enumerator; + HRESULT hr = CoCreateInstance(__uuidof(MMDeviceEnumerator), + NULL, + CLSCTX_INPROC_SERVER, + __uuidof(IMMDeviceEnumerator), + enumerator.ReceiveVoid()); + if (FAILED(hr)) { + NOTREACHED() << "error code: " << hr; + return 0.0; + } + + ScopedComPtr<IMMDevice> endpoint_device; + hr = enumerator->GetDefaultAudioEndpoint(eCapture, + device_role, + endpoint_device.Receive()); + if (FAILED(hr)) { + NOTREACHED() << "error code: " << hr; + return 0.0; + } + + ScopedComPtr<IAudioClient> audio_client; + hr = endpoint_device->Activate(__uuidof(IAudioClient), + CLSCTX_INPROC_SERVER, + NULL, + audio_client.ReceiveVoid()); + if (FAILED(hr)) { + NOTREACHED() << "error code: " << hr; + return 0.0; + } + + base::win::ScopedCoMem<WAVEFORMATEX> audio_engine_mix_format; + hr = audio_client->GetMixFormat(&audio_engine_mix_format); + if (FAILED(hr)) { + NOTREACHED() << "error code: " << hr; + return 0.0; + } + + return static_cast<double>(audio_engine_mix_format->nSamplesPerSec); +} + +void WASAPIAudioInputStream::Run() { + ScopedCOMInitializer com_init(ScopedCOMInitializer::kMTA); + + // Increase the thread priority. + capture_thread_->SetThreadPriority(base::kThreadPriority_RealtimeAudio); + + // Enable MMCSS to ensure that this thread receives prioritized access to + // CPU resources. + DWORD task_index = 0; + HANDLE mm_task = avrt::AvSetMmThreadCharacteristics(L"Pro Audio", + &task_index); + bool mmcss_is_ok = + (mm_task && avrt::AvSetMmThreadPriority(mm_task, AVRT_PRIORITY_CRITICAL)); + if (!mmcss_is_ok) { + // Failed to enable MMCSS on this thread. It is not fatal but can lead + // to reduced QoS at high load. + DWORD err = GetLastError(); + LOG(WARNING) << "Failed to enable MMCSS (error code=" << err << ")."; + } + + // Allocate a buffer with a size that enables us to take care of cases like: + // 1) The recorded buffer size is smaller, or does not match exactly with, + // the selected packet size used in each callback. + // 2) The selected buffer size is larger than the recorded buffer size in + // each event. + size_t buffer_frame_index = 0; + size_t capture_buffer_size = std::max( + 2 * endpoint_buffer_size_frames_ * frame_size_, + 2 * packet_size_frames_ * frame_size_); + scoped_array<uint8> capture_buffer(new uint8[capture_buffer_size]); + + LARGE_INTEGER now_count; + bool recording = true; + bool error = false; + HANDLE wait_array[2] = {stop_capture_event_, audio_samples_ready_event_}; + + while (recording && !error) { + HRESULT hr = S_FALSE; + + // Wait for a close-down event or a new capture event. + DWORD wait_result = WaitForMultipleObjects(2, wait_array, FALSE, INFINITE); + switch (wait_result) { + case WAIT_FAILED: + error = true; + break; + case WAIT_OBJECT_0 + 0: + // |stop_capture_event_| has been set. + recording = false; + break; + case WAIT_OBJECT_0 + 1: + { + // |audio_samples_ready_event_| has been set. + BYTE* data_ptr = NULL; + UINT32 num_frames_to_read = 0; + DWORD flags = 0; + UINT64 device_position = 0; + UINT64 first_audio_frame_timestamp = 0; + + // Retrieve the amount of data in the capture endpoint buffer, + // replace it with silence if required, create callbacks for each + // packet and store non-delivered data for the next event. + hr = audio_capture_client_->GetBuffer(&data_ptr, + &num_frames_to_read, + &flags, + &device_position, + &first_audio_frame_timestamp); + if (FAILED(hr)) { + DLOG(ERROR) << "Failed to get data from the capture buffer"; + continue; + } + + if (num_frames_to_read != 0) { + size_t pos = buffer_frame_index * frame_size_; + size_t num_bytes = num_frames_to_read * frame_size_; + DCHECK_GE(capture_buffer_size, pos + num_bytes); + + if (flags & AUDCLNT_BUFFERFLAGS_SILENT) { + // Clear out the local buffer since silence is reported. + memset(&capture_buffer[pos], 0, num_bytes); + } else { + // Copy captured data from audio engine buffer to local buffer. + memcpy(&capture_buffer[pos], data_ptr, num_bytes); + } + + buffer_frame_index += num_frames_to_read; + } + + hr = audio_capture_client_->ReleaseBuffer(num_frames_to_read); + DLOG_IF(ERROR, FAILED(hr)) << "Failed to release capture buffer"; + + // Derive a delay estimate for the captured audio packet. + // The value contains two parts (A+B), where A is the delay of the + // first audio frame in the packet and B is the extra delay + // contained in any stored data. Unit is in audio frames. + QueryPerformanceCounter(&now_count); + double audio_delay_frames = + ((perf_count_to_100ns_units_ * now_count.QuadPart - + first_audio_frame_timestamp) / 10000.0) * ms_to_frame_count_ + + buffer_frame_index - num_frames_to_read; + + // Deliver captured data to the registered consumer using a packet + // size which was specified at construction. + uint32 delay_frames = static_cast<uint32>(audio_delay_frames + 0.5); + while (buffer_frame_index >= packet_size_frames_) { + uint8* audio_data = + reinterpret_cast<uint8*>(capture_buffer.get()); + + // Deliver data packet and delay estimation to the user. + sink_->OnData(this, + audio_data, + packet_size_bytes_, + delay_frames * frame_size_); + + // Store parts of the recorded data which can't be delivered + // using the current packet size. The stored section will be used + // either in the next while-loop iteration or in the next + // capture event. + memmove(&capture_buffer[0], + &capture_buffer[packet_size_bytes_], + (buffer_frame_index - packet_size_frames_) * frame_size_); + + buffer_frame_index -= packet_size_frames_; + delay_frames -= packet_size_frames_; + } + } + break; + default: + error = true; + break; + } + } + + if (recording && error) { + // TODO(henrika): perhaps it worth improving the cleanup here by e.g. + // stopping the audio client, joining the thread etc.? + NOTREACHED() << "WASAPI capturing failed with error code " + << GetLastError(); + } + + // Disable MMCSS. + if (mm_task && !avrt::AvRevertMmThreadCharacteristics(mm_task)) { + PLOG(WARNING) << "Failed to disable MMCSS"; + } +} + +void WASAPIAudioInputStream::HandleError(HRESULT err) { + NOTREACHED() << "Error code: " << err; + if (sink_) + sink_->OnError(this, static_cast<int>(err)); +} + +HRESULT WASAPIAudioInputStream::SetCaptureDevice(ERole device_role) { + ScopedComPtr<IMMDeviceEnumerator> enumerator; + HRESULT hr = CoCreateInstance(__uuidof(MMDeviceEnumerator), + NULL, + CLSCTX_INPROC_SERVER, + __uuidof(IMMDeviceEnumerator), + enumerator.ReceiveVoid()); + if (SUCCEEDED(hr)) { + // Retrieve the default capture audio endpoint for the specified role. + // Note that, in Windows Vista, the MMDevice API supports device roles + // but the system-supplied user interface programs do not. + hr = enumerator->GetDefaultAudioEndpoint(eCapture, + device_role, + endpoint_device_.Receive()); + + // Verify that the audio endpoint device is active. That is, the audio + // adapter that connects to the endpoint device is present and enabled. + DWORD state = DEVICE_STATE_DISABLED; + hr = endpoint_device_->GetState(&state); + if (SUCCEEDED(hr)) { + if (!(state & DEVICE_STATE_ACTIVE)) { + DLOG(ERROR) << "Selected capture device is not active."; + hr = E_ACCESSDENIED; + } + } + } + + return hr; +} + +HRESULT WASAPIAudioInputStream::ActivateCaptureDevice() { + // Creates and activates an IAudioClient COM object given the selected + // capture endpoint device. + HRESULT hr = endpoint_device_->Activate(__uuidof(IAudioClient), + CLSCTX_INPROC_SERVER, + NULL, + audio_client_.ReceiveVoid()); + return hr; +} + +HRESULT WASAPIAudioInputStream::GetAudioEngineStreamFormat() { + // Retrieve the stream format that the audio engine uses for its internal + // processing/mixing of shared-mode streams. + return audio_client_->GetMixFormat(&audio_engine_mix_format_); +} + +bool WASAPIAudioInputStream::DesiredFormatIsSupported() { + // In shared mode, the audio engine always supports the mix format, + // which is stored in the |audio_engine_mix_format_| member. In addition, + // the audio engine *might* support similar formats that have the same + // sample rate and number of channels as the mix format but differ in + // the representation of audio sample values. + base::win::ScopedCoMem<WAVEFORMATEX> closest_match; + HRESULT hr = audio_client_->IsFormatSupported(AUDCLNT_SHAREMODE_SHARED, + &format_, + &closest_match); + DLOG_IF(ERROR, hr == S_FALSE) << "Format is not supported " + << "but a closest match exists."; + return (hr == S_OK); +} + +HRESULT WASAPIAudioInputStream::InitializeAudioEngine() { + // Initialize the audio stream between the client and the device. + // We connect indirectly through the audio engine by using shared mode + // and WASAPI is initialized in an event driven mode. + // Note that, |hnsBufferDuration| is set of 0, which ensures that the + // buffer is never smaller than the minimum buffer size needed to ensure + // that glitches do not occur between the periodic processing passes. + // This setting should lead to lowest possible latency. + HRESULT hr = audio_client_->Initialize(AUDCLNT_SHAREMODE_SHARED, + AUDCLNT_STREAMFLAGS_EVENTCALLBACK | + AUDCLNT_STREAMFLAGS_NOPERSIST, + 0, // hnsBufferDuration + 0, + &format_, + NULL); + if (FAILED(hr)) + return hr; + + // Retrieve the length of the endpoint buffer shared between the client + // and the audio engine. The buffer length determines the maximum amount + // of capture data that the audio engine can read from the endpoint buffer + // during a single processing pass. + // A typical value is 960 audio frames <=> 20ms @ 48kHz sample rate. + hr = audio_client_->GetBufferSize(&endpoint_buffer_size_frames_); + if (FAILED(hr)) + return hr; + DVLOG(1) << "endpoint buffer size: " << endpoint_buffer_size_frames_ + << " [frames]"; + +#ifndef NDEBUG + // The period between processing passes by the audio engine is fixed for a + // particular audio endpoint device and represents the smallest processing + // quantum for the audio engine. This period plus the stream latency between + // the buffer and endpoint device represents the minimum possible latency + // that an audio application can achieve. + // TODO(henrika): possibly remove this section when all parts are ready. + REFERENCE_TIME device_period_shared_mode = 0; + REFERENCE_TIME device_period_exclusive_mode = 0; + HRESULT hr_dbg = audio_client_->GetDevicePeriod( + &device_period_shared_mode, &device_period_exclusive_mode); + if (SUCCEEDED(hr_dbg)) { + DVLOG(1) << "device period: " + << static_cast<double>(device_period_shared_mode / 10000.0) + << " [ms]"; + } + + REFERENCE_TIME latency = 0; + hr_dbg = audio_client_->GetStreamLatency(&latency); + if (SUCCEEDED(hr_dbg)) { + DVLOG(1) << "stream latency: " << static_cast<double>(latency / 10000.0) + << " [ms]"; + } +#endif + + // Set the event handle that the audio engine will signal each time + // a buffer becomes ready to be processed by the client. + hr = audio_client_->SetEventHandle(audio_samples_ready_event_.Get()); + if (FAILED(hr)) + return hr; + + // Get access to the IAudioCaptureClient interface. This interface + // enables us to read input data from the capture endpoint buffer. + hr = audio_client_->GetService(__uuidof(IAudioCaptureClient), + audio_capture_client_.ReceiveVoid()); + return hr; +} diff --git a/media/audio/win/audio_low_latency_input_win.h b/media/audio/win/audio_low_latency_input_win.h new file mode 100644 index 0000000..b338dc2 --- /dev/null +++ b/media/audio/win/audio_low_latency_input_win.h @@ -0,0 +1,182 @@ +// Copyright (c) 2011 The Chromium Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. +// +// Implementation of AudioInputStream for Windows using Windows Core Audio +// WASAPI for low latency capturing. +// +// Overview of operation: +// +// - An object of WASAPIAudioInputStream is created by the AudioManager +// factory. +// - Next some thread will call Open(), at that point the underlying +// Core Audio APIs are utilized to create two WASAPI interfaces called +// IAudioClient and IAudioCaptureClient. +// - Then some thread will call Start(sink). +// A thread called "wasapi_capture_thread" is started and this thread listens +// on an event signal which is set periodically by the audio engine for +// each recorded data packet. As a result, data samples will be provided +// to the registered sink. +// - At some point, a thread will call Stop(), which stops and joins the +// capture thread and at the same time stops audio streaming. +// - The same thread that called stop will call Close() where we cleanup +// and notify the audio manager, which likely will destroy this object. +// +// Implementation notes: +// +// - The minimum supported client is Windows Vista. +// - This implementation is single-threaded, hence: +// o Construction and destruction must take place from the same thread. +// o It is recommended to call all APIs from the same thread as well. +// - It is recommended to first acquire the native sample rate of the default +// input device and then use the same rate when creating this object. Use +// WASAPIAudioInputStream::HardwareSampleRate() to retrieve the sample rate. +// - Calling Close() also leads to self destruction. +// +// Core Audio API details: +// +// - CoInitializeEx() is called on the creating thread and on the internal +// capture thread. Each thread's concurrency model and apartment is set +// to multi-threaded (MTA). CHECK() is called to ensure that we crash if +// CoInitializeEx(MTA) fails. +// - Utilized MMDevice interfaces: +// o IMMDeviceEnumerator +// o IMMDevice +// - Utilized WASAPI interfaces: +// o IAudioClient +// o IAudioCaptureClient +// - The stream is initialized in shared mode and the processing of the +// audio buffer is event driven. +// - The Multimedia Class Scheduler service (MMCSS) is utilized to boost +// the priority of the capture thread. +// +#ifndef MEDIA_AUDIO_WIN_AUDIO_LOW_LATENCY_INPUT_WIN_H_ +#define MEDIA_AUDIO_WIN_AUDIO_LOW_LATENCY_INPUT_WIN_H_ + +#include <Audioclient.h> +#include <MMDeviceAPI.h> + +#include "base/compiler_specific.h" +#include "base/threading/platform_thread.h" +#include "base/threading/simple_thread.h" +#include "base/win/scoped_co_mem.h" +#include "base/win/scoped_com_initializer.h" +#include "base/win/scoped_comptr.h" +#include "base/win/scoped_handle.h" +#include "media/audio/audio_io.h" +#include "media/audio/audio_parameters.h" + +class AudioManagerWin; + +// AudioInputStream implementation using Windows Core Audio APIs. +class WASAPIAudioInputStream + : public AudioInputStream, + public base::DelegateSimpleThread::Delegate { + public: + // The ctor takes all the usual parameters, plus |manager| which is the + // the audio manager who is creating this object. + WASAPIAudioInputStream(AudioManagerWin* manager, + const AudioParameters& params, + ERole device_role); + // The dtor is typically called by the AudioManager only and it is usually + // triggered by calling AudioInputStream::Close(). + virtual ~WASAPIAudioInputStream(); + + // Implementation of AudioInputStream. + virtual bool Open() OVERRIDE; + virtual void Start(AudioInputCallback* callback) OVERRIDE; + virtual void Stop() OVERRIDE; + virtual void Close() OVERRIDE; + + // Retrieves the stream format that the audio engine uses for its internal + // processing/mixing of shared-mode streams. + static double HardwareSampleRate(ERole device_role); + + bool started() const { return started_; } + + private: + // DelegateSimpleThread::Delegate implementation. + virtual void Run() OVERRIDE; + + // Issues the OnError() callback to the |sink_|. + void HandleError(HRESULT err); + + // The Open() method is divided into these sub methods. + HRESULT SetCaptureDevice(ERole device_role); + HRESULT ActivateCaptureDevice(); + HRESULT GetAudioEngineStreamFormat(); + bool DesiredFormatIsSupported(); + HRESULT InitializeAudioEngine(); + + // Initializes the COM library for use by the calling thread and set the + // thread's concurrency model to multi-threaded. + base::win::ScopedCOMInitializer com_init_; + + // Our creator, the audio manager needs to be notified when we close. + AudioManagerWin* manager_; + + // Capturing is driven by this thread (which has no message loop). + // All OnData() callbacks will be called from this thread. + base::DelegateSimpleThread* capture_thread_; + + // Contains the desired audio format which is set up at construction. + WAVEFORMATEX format_; + + // Copy of the audio format which we know the audio engine supports. + // It is recommended to ensure that the sample rate in |format_| is identical + // to the sample rate in |audio_engine_mix_format_|. + base::win::ScopedCoMem<WAVEFORMATEX> audio_engine_mix_format_; + + bool opened_; + bool started_; + + // Size in bytes of each audio frame (4 bytes for 16-bit stereo PCM) + size_t frame_size_; + + // Size in audio frames of each audio packet where an audio packet + // is defined as the block of data which the user received in each + // OnData() callback. + size_t packet_size_frames_; + + // Size in bytes of each audio packet. + size_t packet_size_bytes_; + + // Length of the audio endpoint buffer. + size_t endpoint_buffer_size_frames_; + + // Defines the role that the system has assigned to an audio endpoint device. + ERole device_role_; + + // Conversion factor used in delay-estimation calculations. + // Converts a raw performance counter value to 100-nanosecond unit. + double perf_count_to_100ns_units_; + + // Conversion factor used in delay-estimation calculations. + // Converts from milliseconds to audio frames. + double ms_to_frame_count_; + + // Pointer to the object that will receive the recorded audio samples. + AudioInputCallback* sink_; + + // An IMMDevice interface which represents an audio endpoint device. + base::win::ScopedComPtr<IMMDevice> endpoint_device_; + + // An IAudioClient interface which enables a client to create and initialize + // an audio stream between an audio application and the audio engine. + base::win::ScopedComPtr<IAudioClient> audio_client_; + + // The IAudioCaptureClient interface enables a client to read input data + // from a capture endpoint buffer. + base::win::ScopedComPtr<IAudioCaptureClient> audio_capture_client_; + + // The audio engine will signal this event each time a buffer has been + // recorded. + base::win::ScopedHandle audio_samples_ready_event_; + + // This event will be signaled when capturing shall stop. + base::win::ScopedHandle stop_capture_event_; + + DISALLOW_COPY_AND_ASSIGN(WASAPIAudioInputStream); +}; + +#endif // MEDIA_AUDIO_WIN_AUDIO_LOW_LATENCY_INPUT_WIN_H_ diff --git a/media/audio/win/audio_low_latency_input_win_unittest.cc b/media/audio/win/audio_low_latency_input_win_unittest.cc new file mode 100644 index 0000000..e6da1ce --- /dev/null +++ b/media/audio/win/audio_low_latency_input_win_unittest.cc @@ -0,0 +1,368 @@ +// Copyright (c) 2011 The Chromium Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#include <windows.h> +#include <mmsystem.h> + +#include "base/basictypes.h" +#include "base/environment.h" +#include "base/memory/scoped_ptr.h" +#include "base/test/test_timeouts.h" +#include "base/win/scoped_com_initializer.h" +#include "media/audio/audio_io.h" +#include "media/audio/audio_manager.h" +#include "media/audio/win/audio_low_latency_input_win.h" +#include "media/base/seekable_buffer.h" +#include "testing/gmock/include/gmock/gmock.h" +#include "testing/gtest/include/gtest/gtest.h" + +using base::win::ScopedCOMInitializer; +using ::testing::AnyNumber; +using ::testing::Between; +using ::testing::Gt; +using ::testing::NotNull; + +class MockAudioInputCallback : public AudioInputStream::AudioInputCallback { + public: + MOCK_METHOD4(OnData, void(AudioInputStream* stream, + const uint8* src, uint32 size, + uint32 hardware_delay_bytes)); + MOCK_METHOD1(OnClose, void(AudioInputStream* stream)); + MOCK_METHOD2(OnError, void(AudioInputStream* stream, int code)); +}; + +// This audio sink implementation should be used for manual tests only since +// the recorded data is stored on a raw binary data file. +class WriteToFileAudioSink : public AudioInputStream::AudioInputCallback { + public: + // Allocate space for ~10 seconds of data @ 48kHz in stereo: + // 2 bytes per sample, 2 channels, 10ms @ 48kHz, 10 seconds <=> 1920000 bytes. + static const size_t kMaxBufferSize = 2 * 2 * 480 * 100 * 10; + + explicit WriteToFileAudioSink(const char* file_name) + : buffer_(0, kMaxBufferSize), + file_(fopen(file_name, "wb")), + bytes_to_write_(0) { + } + + virtual ~WriteToFileAudioSink() { + size_t bytes_written = 0; + while (bytes_written < bytes_to_write_) { + const uint8* chunk; + size_t chunk_size; + + // Stop writing if no more data is available. + if (!buffer_.GetCurrentChunk(&chunk, &chunk_size)) + break; + + // Write recorded data chunk to the file and prepare for next chunk. + fwrite(chunk, 1, chunk_size, file_); + buffer_.Seek(chunk_size); + bytes_written += chunk_size; + } + fclose(file_); + } + + // AudioInputStream::AudioInputCallback implementation. + virtual void OnData(AudioInputStream* stream, + const uint8* src, + uint32 size, + uint32 hardware_delay_bytes) { + // Store data data in a temporary buffer to avoid making blocking + // fwrite() calls in the audio callback. The complete buffer will be + // written to file in the destructor. + if (buffer_.Append(src, size)) { + bytes_to_write_ += size; + } + } + + virtual void OnClose(AudioInputStream* stream) {} + virtual void OnError(AudioInputStream* stream, int code) {} + + private: + media::SeekableBuffer buffer_; + FILE* file_; + size_t bytes_to_write_; +}; + +// Convenience method which ensures that we are not running on the build +// bots and that at least one valid input device can be found. +static bool CanRunAudioTests() { + scoped_ptr<base::Environment> env(base::Environment::Create()); + if (env->HasVar("CHROME_HEADLESS")) + return false; + AudioManager* audio_man = AudioManager::GetAudioManager(); + if (NULL == audio_man) + return false; + // TODO(henrika): note that we use Wave today to query the number of + // existing input devices. + return audio_man->HasAudioInputDevices(); +} + +// Convenience method which creates a default AudioInputStream object but +// also allows the user to modify the default settings. +class AudioInputStreamWrapper { + public: + AudioInputStreamWrapper() + : com_init_(ScopedCOMInitializer::kMTA), + audio_man_(AudioManager::GetAudioManager()), + format_(AudioParameters::AUDIO_PCM_LOW_LATENCY), + channel_layout_(CHANNEL_LAYOUT_STEREO), + bits_per_sample_(16) { + // Use native/mixing sample rate and 10ms frame size as default. + sample_rate_ = static_cast<int>( + WASAPIAudioInputStream::HardwareSampleRate(eConsole)); + sample_rate_ = 48000; + samples_per_packet_ = sample_rate_ / 100; + } + + ~AudioInputStreamWrapper() {} + + // Creates AudioInputStream object using default parameters. + AudioInputStream* Create() { + return CreateInputStream(); + } + + // Creates AudioInputStream object using non-default parameters where the + // frame size is modified. + AudioInputStream* Create(int samples_per_packet) { + samples_per_packet_ = samples_per_packet; + return CreateInputStream(); + } + + AudioParameters::Format format() const { return format_; } + int channels() const { + return ChannelLayoutToChannelCount(channel_layout_); + } + int bits_per_sample() const { return bits_per_sample_; } + int sample_rate() const { return sample_rate_; } + int samples_per_packet() const { return samples_per_packet_; } + + private: + AudioInputStream* CreateInputStream() { + AudioInputStream* ais = audio_man_->MakeAudioInputStream( + AudioParameters(format_, channel_layout_, sample_rate_, + bits_per_sample_, samples_per_packet_)); + EXPECT_TRUE(ais); + return ais; + } + + ScopedCOMInitializer com_init_; + AudioManager* audio_man_; + AudioParameters::Format format_; + ChannelLayout channel_layout_; + int bits_per_sample_; + int sample_rate_; + int samples_per_packet_; +}; + +// Convenience method which creates a default AudioInputStream object. +static AudioInputStream* CreateDefaultAudioInputStream() { + AudioInputStreamWrapper aisw; + AudioInputStream* ais = aisw.Create(); + return ais; +} + +// Verify that we can retrieve the current hardware/mixing sample rate +// for all supported device roles. The ERole enumeration defines constants +// that indicate the role that the system/user has assigned to an audio +// endpoint device. +// TODO(henrika): modify this test when we suport full device enumeration. +TEST(WinAudioInputTest, WASAPIAudioInputStreamHardwareSampleRate) { + if (!CanRunAudioTests()) + return; + + ScopedCOMInitializer com_init(ScopedCOMInitializer::kMTA); + + // Default device intended for games, system notification sounds, + // and voice commands. + int fs = static_cast<int>( + WASAPIAudioInputStream::HardwareSampleRate(eConsole)); + EXPECT_GE(fs, 0); + + // Default communication device intended for e.g. VoIP communication. + fs = static_cast<int>( + WASAPIAudioInputStream::HardwareSampleRate(eCommunications)); + EXPECT_GE(fs, 0); + + // Multimedia device for music, movies and live music recording. + fs = static_cast<int>( + WASAPIAudioInputStream::HardwareSampleRate(eMultimedia)); + EXPECT_GE(fs, 0); +} + +// Test Create(), Close() calling sequence. +TEST(WinAudioInputTest, WASAPIAudioInputStreamCreateAndClose) { + if (!CanRunAudioTests()) + return; + AudioInputStream* ais = CreateDefaultAudioInputStream(); + ais->Close(); +} + +// Test Open(), Close() calling sequence. +TEST(WinAudioInputTest, WASAPIAudioInputStreamOpenAndClose) { + if (!CanRunAudioTests()) + return; + AudioInputStream* ais = CreateDefaultAudioInputStream(); + EXPECT_TRUE(ais->Open()); + ais->Close(); +} + +// Test Open(), Start(), Close() calling sequence. +TEST(WinAudioInputTest, WASAPIAudioInputStreamOpenStartAndClose) { + if (!CanRunAudioTests()) + return; + AudioInputStream* ais = CreateDefaultAudioInputStream(); + EXPECT_TRUE(ais->Open()); + MockAudioInputCallback sink; + ais->Start(&sink); + EXPECT_CALL(sink, OnClose(ais)) + .Times(1); + ais->Close(); +} + +// Test Open(), Start(), Stop(), Close() calling sequence. +TEST(WinAudioInputTest, WASAPIAudioInputStreamOpenStartStopAndClose) { + if (!CanRunAudioTests()) + return; + AudioInputStream* ais = CreateDefaultAudioInputStream(); + EXPECT_TRUE(ais->Open()); + MockAudioInputCallback sink; + ais->Start(&sink); + ais->Stop(); + EXPECT_CALL(sink, OnClose(ais)) + .Times(1); + ais->Close(); +} + +// Test some additional calling sequences. +TEST(MacAudioInputTest, WASAPIAudioInputStreamMiscCallingSequences) { + if (!CanRunAudioTests()) + return; + AudioInputStream* ais = CreateDefaultAudioInputStream(); + WASAPIAudioInputStream* wais = static_cast<WASAPIAudioInputStream*>(ais); + + // Open(), Open() should fail the second time. + EXPECT_TRUE(ais->Open()); + EXPECT_FALSE(ais->Open()); + + MockAudioInputCallback sink; + + // Start(), Start() is a valid calling sequence (second call does nothing). + ais->Start(&sink); + EXPECT_TRUE(wais->started()); + ais->Start(&sink); + EXPECT_TRUE(wais->started()); + + // Stop(), Stop() is a valid calling sequence (second call does nothing). + ais->Stop(); + EXPECT_FALSE(wais->started()); + ais->Stop(); + EXPECT_FALSE(wais->started()); + + EXPECT_CALL(sink, OnClose(ais)) + .Times(1); + ais->Close(); +} + +TEST(WinAudioInputTest, WASAPIAudioInputStreamTestPacketSizes) { + if (!CanRunAudioTests()) + return; + + // 10 ms packet size. + + // Create default WASAPI input stream which records in stereo using + // the shared mixing rate. The default buffer size is 10ms. + AudioInputStreamWrapper aisw; + AudioInputStream* ais = aisw.Create(); + EXPECT_TRUE(ais->Open()); + + MockAudioInputCallback sink; + + // Derive the expected size in bytes of each recorded packet. + uint32 bytes_per_packet = aisw.channels() * aisw.samples_per_packet() * + (aisw.bits_per_sample() / 8); + + // We use 10ms packets and will run the test for ~100ms. Given that the + // startup sequence takes some time, it is reasonable to expect 5-12 + // callbacks in this time period. All should contain valid packets of + // the same size and a valid delay estimate. + EXPECT_CALL(sink, OnData( + ais, NotNull(), bytes_per_packet, Gt(bytes_per_packet))) + .Times(Between(5, 10)); + + ais->Start(&sink); + base::PlatformThread::Sleep(TestTimeouts::tiny_timeout_ms()); + ais->Stop(); + + // Store current packet size (to be used in the subsequent tests). + int samples_per_packet_10ms = aisw.samples_per_packet(); + + EXPECT_CALL(sink, OnClose(ais)) + .Times(1); + ais->Close(); + + // 20 ms packet size. + + ais = aisw.Create(2 * samples_per_packet_10ms); + EXPECT_TRUE(ais->Open()); + bytes_per_packet = aisw.channels() * aisw.samples_per_packet() * + (aisw.bits_per_sample() / 8); + + EXPECT_CALL(sink, OnData( + ais, NotNull(), bytes_per_packet, Gt(bytes_per_packet))) + .Times(Between(5, 10)); + ais->Start(&sink); + base::PlatformThread::Sleep(2 * TestTimeouts::tiny_timeout_ms()); + ais->Stop(); + + EXPECT_CALL(sink, OnClose(ais)) + .Times(1); + ais->Close(); + + // 5 ms packet size. + + ais = aisw.Create(samples_per_packet_10ms / 2); + EXPECT_TRUE(ais->Open()); + bytes_per_packet = aisw.channels() * aisw.samples_per_packet() * + (aisw.bits_per_sample() / 8); + + EXPECT_CALL(sink, OnData( + ais, NotNull(), bytes_per_packet, Gt(bytes_per_packet))) + .Times(Between(2 * 5, 2 * 10)); + ais->Start(&sink); + base::PlatformThread::Sleep(TestTimeouts::tiny_timeout_ms()); + ais->Stop(); + + EXPECT_CALL(sink, OnClose(ais)) + .Times(1); + ais->Close(); +} + +// This test is intended for manual tests and should only be enabled +// when it is required to store the captured data on a local file. +// By default, GTest will print out YOU HAVE 1 DISABLED TEST. +// To include disabled tests in test execution, just invoke the test program +// with --gtest_also_run_disabled_tests or set the GTEST_ALSO_RUN_DISABLED_TESTS +// environment variable to a value greater than 0. +TEST(WinAudioInputTest, DISABLED_WASAPIAudioInputStreamRecordToFile) { + if (!CanRunAudioTests()) + return; + + const char* file_name = "out_stereo_10sec.pcm"; + + AudioInputStreamWrapper aisw; + AudioInputStream* ais = aisw.Create(); + EXPECT_TRUE(ais->Open()); + + fprintf(stderr, " File name : %s\n", file_name); + fprintf(stderr, " Sample rate: %d\n", aisw.sample_rate()); + WriteToFileAudioSink file_sink(file_name); + fprintf(stderr, " >> Speak into the mic while recording...\n"); + ais->Start(&file_sink); + base::PlatformThread::Sleep(TestTimeouts::action_timeout_ms()); + ais->Stop(); + fprintf(stderr, " >> Recording has stopped.\n"); + ais->Close(); +} diff --git a/media/audio/win/audio_manager_win.cc b/media/audio/win/audio_manager_win.cc index c361b87d..e9c1d20 100644 --- a/media/audio/win/audio_manager_win.cc +++ b/media/audio/win/audio_manager_win.cc @@ -21,6 +21,7 @@ #include "base/win/windows_version.h" #include "media/audio/fake_audio_input_stream.h" #include "media/audio/fake_audio_output_stream.h" +#include "media/audio/win/audio_low_latency_input_win.h" #include "media/audio/win/audio_manager_win.h" #include "media/audio/win/wavein_input_win.h" #include "media/audio/win/waveout_output_win.h" @@ -148,6 +149,17 @@ AudioInputStream* AudioManagerWin::MakeAudioInputStream( } else if (params.format == AudioParameters::AUDIO_PCM_LINEAR) { return new PCMWaveInAudioInputStream(this, params, kNumInputBuffers, WAVE_MAPPER); + } else if (params.format == AudioParameters::AUDIO_PCM_LOW_LATENCY) { + if (base::win::GetVersion() <= base::win::VERSION_XP) { + // Fall back to Windows Wave implementation on Windows XP or lower. + DLOG(INFO) << "Using WaveIn since WASAPI requires at least Vista."; + return new PCMWaveInAudioInputStream(this, params, kNumInputBuffers, + WAVE_MAPPER); + } else { + // TODO(henrika): improve possibility to specify audio endpoint. + // Use the default device (same as for Wave) for now to be compatible. + return new WASAPIAudioInputStream(this, params, eConsole); + } } return NULL; } @@ -158,7 +170,7 @@ void AudioManagerWin::ReleaseOutputStream(PCMWaveOutAudioOutputStream* stream) { delete stream; } -void AudioManagerWin::ReleaseInputStream(PCMWaveInAudioInputStream* stream) { +void AudioManagerWin::ReleaseInputStream(AudioInputStream* stream) { delete stream; } diff --git a/media/audio/win/audio_manager_win.h b/media/audio/win/audio_manager_win.h index 6db13c9..949e765 100644 --- a/media/audio/win/audio_manager_win.h +++ b/media/audio/win/audio_manager_win.h @@ -2,8 +2,8 @@ // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. -#ifndef MEDIA_AUDIO_AUDIO_MANAGER_WIN_H_ -#define MEDIA_AUDIO_AUDIO_MANAGER_WIN_H_ +#ifndef MEDIA_AUDIO_WIN_AUDIO_MANAGER_WIN_H_ +#define MEDIA_AUDIO_WIN_AUDIO_MANAGER_WIN_H_ #include <windows.h> @@ -40,7 +40,7 @@ class AudioManagerWin : public AudioManagerBase { void ReleaseOutputStream(PCMWaveOutAudioOutputStream* stream); // Called internally by the audio stream when it has been closed. - void ReleaseInputStream(PCMWaveInAudioInputStream* stream); + void ReleaseInputStream(AudioInputStream* stream); private: virtual ~AudioManagerWin(); @@ -51,4 +51,4 @@ class AudioManagerWin : public AudioManagerBase { DISALLOW_COPY_AND_ASSIGN(AudioManagerWin); }; -#endif // MEDIA_AUDIO_AUDIO_MANAGER_WIN_H_ +#endif // MEDIA_AUDIO_WIN_AUDIO_MANAGER_WIN_H_ diff --git a/media/audio/win/avrt_wrapper_win.cc b/media/audio/win/avrt_wrapper_win.cc new file mode 100644 index 0000000..c9f1599 --- /dev/null +++ b/media/audio/win/avrt_wrapper_win.cc @@ -0,0 +1,64 @@ +// Copyright (c) 2011 The Chromium Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#include "media/audio/win/avrt_wrapper_win.h" + +#include "base/logging.h" + +namespace avrt { + +// Function pointers +typedef BOOL (WINAPI *AvRevertMmThreadCharacteristicsFn)(HANDLE); +typedef HANDLE (WINAPI *AvSetMmThreadCharacteristicsFn)(LPCWSTR, LPDWORD); +typedef BOOL (WINAPI *AvSetMmThreadPriorityFn)(HANDLE, AVRT_PRIORITY); + +HMODULE g_avrt = NULL; +AvRevertMmThreadCharacteristicsFn g_revert_mm_thread_characteristics = NULL; +AvSetMmThreadCharacteristicsFn g_set_mm_thread_characteristics = NULL; +AvSetMmThreadPriorityFn g_set_mm_thread_priority = NULL; + +bool Initialize() { + if (!g_set_mm_thread_priority) { + // The avrt.dll is available on Windows Vista and later. + wchar_t path[MAX_PATH] = {0}; + ExpandEnvironmentStrings(L"%WINDIR%\\system32\\avrt.dll", path, + arraysize(path)); + g_avrt = LoadLibraryExW(path, NULL, LOAD_WITH_ALTERED_SEARCH_PATH); + if (!g_avrt) + return false; + + g_revert_mm_thread_characteristics = + reinterpret_cast<AvRevertMmThreadCharacteristicsFn>( + GetProcAddress(g_avrt, "AvRevertMmThreadCharacteristics")); + g_set_mm_thread_characteristics = + reinterpret_cast<AvSetMmThreadCharacteristicsFn>( + GetProcAddress(g_avrt, "AvSetMmThreadCharacteristicsW")); + g_set_mm_thread_priority = reinterpret_cast<AvSetMmThreadPriorityFn>( + GetProcAddress(g_avrt, "AvSetMmThreadPriority")); + } + + return (g_avrt && g_revert_mm_thread_characteristics && + g_set_mm_thread_characteristics && g_set_mm_thread_priority); +} + +bool AvRevertMmThreadCharacteristics(HANDLE avrt_handle) { + DCHECK(g_revert_mm_thread_characteristics); + return (g_revert_mm_thread_characteristics && + g_revert_mm_thread_characteristics(avrt_handle)); +} + +HANDLE AvSetMmThreadCharacteristics(const wchar_t* task_name, + DWORD* task_index) { + DCHECK(g_set_mm_thread_characteristics); + return (g_set_mm_thread_characteristics ? + g_set_mm_thread_characteristics(task_name, task_index) : NULL); +} + +bool AvSetMmThreadPriority(HANDLE avrt_handle, AVRT_PRIORITY priority) { + DCHECK(g_set_mm_thread_priority); + return (g_set_mm_thread_priority && + g_set_mm_thread_priority(avrt_handle, priority)); +} + +} // namespace avrt diff --git a/media/audio/win/avrt_wrapper_win.h b/media/audio/win/avrt_wrapper_win.h new file mode 100644 index 0000000..8127b6b --- /dev/null +++ b/media/audio/win/avrt_wrapper_win.h @@ -0,0 +1,39 @@ +// Copyright (c) 2011 The Chromium Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. +// +// The avrt namespace encapsulates the details needed to support MMCSS. +// +// The Multimedia Class Scheduler service (MMCSS) enables multimedia +// applications to ensure that their time-sensitive processing receives +// prioritized access to CPU resources. This service enables multimedia +// applications to utilize as much of the CPU as possible without denying +// CPU resources to lower-priority applications. +// MMCSS requires Windows Vista or higher and that the Avrt DLL is loaded. +// +// TODO(henrika): refactor and merge into existing thread implementation +// for Windows to ensure that MMCSS can be enabled for all threads. +// +#ifndef MEDIA_AUDIO_WIN_AVRT_WRAPPER_WIN_H_ +#define MEDIA_AUDIO_WIN_AVRT_WRAPPER_WIN_H_ + +#include <windows.h> +#include <avrt.h> + +#include "base/basictypes.h" + +namespace avrt { + +// Loads the Avrt.dll which is available on Windows Vista and later. +bool Initialize(); + +// Function wrappers for the underlying MMCSS functions. +bool AvRevertMmThreadCharacteristics(HANDLE avrt_handle); +HANDLE AvSetMmThreadCharacteristics(const wchar_t* task_name, + DWORD* task_index); +bool AvSetMmThreadPriority(HANDLE avrt_handle, AVRT_PRIORITY priority); + +} // namespace avrt + +#endif // MEDIA_AUDIO_WIN_AVRT_WRAPPER_WIN_H_ + diff --git a/media/media.gyp b/media/media.gyp index 5fcebf7..0f62000 100644 --- a/media/media.gyp +++ b/media/media.gyp @@ -70,6 +70,8 @@ 'audio/mac/audio_input_mac.h', 'audio/mac/audio_low_latency_input_mac.cc', 'audio/mac/audio_low_latency_input_mac.h', + 'audio/win/audio_low_latency_input_win.cc', + 'audio/win/audio_low_latency_input_win.h', 'audio/mac/audio_low_latency_output_mac.cc', 'audio/mac/audio_low_latency_output_mac.h', 'audio/mac/audio_manager_mac.cc', @@ -80,6 +82,8 @@ 'audio/simple_sources.h', 'audio/win/audio_manager_win.h', 'audio/win/audio_manager_win.cc', + 'audio/win/avrt_wrapper_win.h', + 'audio/win/avrt_wrapper_win.cc', 'audio/win/wavein_input_win.cc', 'audio/win/wavein_input_win.h', 'audio/win/waveout_output_win.cc', @@ -559,6 +563,7 @@ 'audio/mac/audio_low_latency_input_mac_unittest.cc', 'audio/mac/audio_output_mac_unittest.cc', 'audio/simple_sources_unittest.cc', + 'audio/win/audio_low_latency_input_win_unittest.cc', 'audio/win/audio_output_win_unittest.cc', 'base/clock_unittest.cc', 'base/composite_filter_unittest.cc', |