summaryrefslogtreecommitdiffstats
path: root/media/audio/mac
diff options
context:
space:
mode:
authorskobes@google.com <skobes@google.com@0039d316-1c4b-4281-b951-d872f2087c98>2014-02-07 22:08:05 +0000
committerskobes@google.com <skobes@google.com@0039d316-1c4b-4281-b951-d872f2087c98>2014-02-07 22:08:05 +0000
commitab2066f6062f3acec61bce9b2cb52910549d051d (patch)
treea7120570dff5c48844cb0b312cf362f5ab41666e /media/audio/mac
parent0cfa96c53fc8f28c5c60c950fb278db89a05d9ad (diff)
downloadchromium_src-ab2066f6062f3acec61bce9b2cb52910549d051d.zip
chromium_src-ab2066f6062f3acec61bce9b2cb52910549d051d.tar.gz
chromium_src-ab2066f6062f3acec61bce9b2cb52910549d051d.tar.bz2
Revert 249790 "Remove the unified IO code on the browser."
http://build.chromium.org/p/chromium.chromiumos/builders/ChromiumOS%20%28amd64%29/builds/14117 chromeos-chrome-34.0.1829.0_alpha-r1: ../../../../../../../home/chrome-bot/chrome_root/src/media/audio/linux/audio_manager_linux.cc: In function 'media::AudioManager* media::CreateAudioManager(media::AudioLogFactory*)': chromeos-chrome-34.0.1829.0_alpha-r1: ../../../../../../../home/chrome-bot/chrome_root/src/media/audio/linux/audio_manager_linux.cc:33:50: error: cannot allocate an object of abstract type 'media::AudioManagerCras' chromeos-chrome-34.0.1829.0_alpha-r1: return new AudioManagerCras(audio_log_factory); chromeos-chrome-34.0.1829.0_alpha-r1: ^ > Remove the unified IO code on the browser. > > Unified IO is not used any more and it should be removed. > > > BUG=337096 > TEST=bots, and nothing breaks. > > Review URL: https://codereview.chromium.org/153623004 TBR=xians@chromium.org Review URL: https://codereview.chromium.org/136233005 git-svn-id: svn://svn.chromium.org/chrome/trunk/src@249811 0039d316-1c4b-4281-b951-d872f2087c98
Diffstat (limited to 'media/audio/mac')
-rw-r--r--media/audio/mac/aggregate_device_manager.cc371
-rw-r--r--media/audio/mac/aggregate_device_manager.h58
-rw-r--r--media/audio/mac/audio_auhal_mac_unittest.cc2
-rw-r--r--media/audio/mac/audio_manager_mac.cc78
-rw-r--r--media/audio/mac/audio_manager_mac.h8
-rw-r--r--media/audio/mac/audio_synchronized_mac.cc976
-rw-r--r--media/audio/mac/audio_synchronized_mac.h216
-rw-r--r--media/audio/mac/audio_unified_mac.cc397
-rw-r--r--media/audio/mac/audio_unified_mac.h100
9 files changed, 2198 insertions, 8 deletions
diff --git a/media/audio/mac/aggregate_device_manager.cc b/media/audio/mac/aggregate_device_manager.cc
new file mode 100644
index 0000000..c7f3233
--- /dev/null
+++ b/media/audio/mac/aggregate_device_manager.cc
@@ -0,0 +1,371 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "media/audio/mac/aggregate_device_manager.h"
+
+#include <CoreAudio/AudioHardware.h>
+#include <string>
+
+#include "base/mac/mac_logging.h"
+#include "base/mac/scoped_cftyperef.h"
+#include "media/audio/audio_parameters.h"
+#include "media/audio/mac/audio_manager_mac.h"
+
+using base::ScopedCFTypeRef;
+
+namespace media {
+
+AggregateDeviceManager::AggregateDeviceManager()
+ : plugin_id_(kAudioObjectUnknown),
+ input_device_(kAudioDeviceUnknown),
+ output_device_(kAudioDeviceUnknown),
+ aggregate_device_(kAudioObjectUnknown) {
+}
+
+AggregateDeviceManager::~AggregateDeviceManager() {
+ DestroyAggregateDevice();
+}
+
+AudioDeviceID AggregateDeviceManager::GetDefaultAggregateDevice() {
+ AudioDeviceID current_input_device;
+ AudioDeviceID current_output_device;
+ AudioManagerMac::GetDefaultInputDevice(&current_input_device);
+ AudioManagerMac::GetDefaultOutputDevice(&current_output_device);
+
+ if (AudioManagerMac::HardwareSampleRateForDevice(current_input_device) !=
+ AudioManagerMac::HardwareSampleRateForDevice(current_output_device)) {
+ // TODO(crogers): with some extra work we can make aggregate devices work
+ // if the clock domain is the same but the sample-rate differ.
+ // For now we fallback to the synchronized path.
+ return kAudioDeviceUnknown;
+ }
+
+ // Use a lazily created aggregate device if it's already available
+ // and still appropriate.
+ if (aggregate_device_ != kAudioObjectUnknown) {
+ // TODO(crogers): handle default device changes for synchronized I/O.
+ // For now, we check to make sure the default devices haven't changed
+ // since we lazily created the aggregate device.
+ if (current_input_device == input_device_ &&
+ current_output_device == output_device_)
+ return aggregate_device_;
+
+ // For now, once lazily created don't attempt to create another
+ // aggregate device.
+ return kAudioDeviceUnknown;
+ }
+
+ input_device_ = current_input_device;
+ output_device_ = current_output_device;
+
+ // Only create an aggregrate device if the clock domains match.
+ UInt32 input_clockdomain = GetClockDomain(input_device_);
+ UInt32 output_clockdomain = GetClockDomain(output_device_);
+ DVLOG(1) << "input_clockdomain: " << input_clockdomain;
+ DVLOG(1) << "output_clockdomain: " << output_clockdomain;
+
+ if (input_clockdomain == 0 || input_clockdomain != output_clockdomain)
+ return kAudioDeviceUnknown;
+
+ OSStatus result = CreateAggregateDevice(
+ input_device_,
+ output_device_,
+ &aggregate_device_);
+ if (result != noErr)
+ DestroyAggregateDevice();
+
+ return aggregate_device_;
+}
+
+CFStringRef AggregateDeviceManager::GetDeviceUID(AudioDeviceID id) {
+ static const AudioObjectPropertyAddress kDeviceUIDAddress = {
+ kAudioDevicePropertyDeviceUID,
+ kAudioObjectPropertyScopeGlobal,
+ kAudioObjectPropertyElementMaster
+ };
+
+ // As stated in the CoreAudio header (AudioHardwareBase.h),
+ // the caller is responsible for releasing the device_UID.
+ CFStringRef device_UID;
+ UInt32 size = sizeof(device_UID);
+ OSStatus result = AudioObjectGetPropertyData(
+ id,
+ &kDeviceUIDAddress,
+ 0,
+ 0,
+ &size,
+ &device_UID);
+
+ return (result == noErr) ? device_UID : NULL;
+}
+
+void AggregateDeviceManager::GetDeviceName(
+ AudioDeviceID id, char* name, UInt32 size) {
+ static const AudioObjectPropertyAddress kDeviceNameAddress = {
+ kAudioDevicePropertyDeviceName,
+ kAudioObjectPropertyScopeGlobal,
+ kAudioObjectPropertyElementMaster
+ };
+
+ OSStatus result = AudioObjectGetPropertyData(
+ id,
+ &kDeviceNameAddress,
+ 0,
+ 0,
+ &size,
+ name);
+
+ if (result != noErr && size > 0)
+ name[0] = 0;
+}
+
+UInt32 AggregateDeviceManager::GetClockDomain(AudioDeviceID device_id) {
+ static const AudioObjectPropertyAddress kClockDomainAddress = {
+ kAudioDevicePropertyClockDomain,
+ kAudioObjectPropertyScopeGlobal,
+ kAudioObjectPropertyElementMaster
+ };
+
+ UInt32 clockdomain = 0;
+ UInt32 size = sizeof(UInt32);
+ OSStatus result = AudioObjectGetPropertyData(
+ device_id,
+ &kClockDomainAddress,
+ 0,
+ 0,
+ &size,
+ &clockdomain);
+
+ return (result == noErr) ? clockdomain : 0;
+}
+
+OSStatus AggregateDeviceManager::GetPluginID(AudioObjectID* id) {
+ DCHECK(id);
+
+ // Get the audio hardware plugin.
+ CFStringRef bundle_name = CFSTR("com.apple.audio.CoreAudio");
+
+ AudioValueTranslation plugin_translation;
+ plugin_translation.mInputData = &bundle_name;
+ plugin_translation.mInputDataSize = sizeof(bundle_name);
+ plugin_translation.mOutputData = id;
+ plugin_translation.mOutputDataSize = sizeof(*id);
+
+ static const AudioObjectPropertyAddress kPlugInAddress = {
+ kAudioHardwarePropertyPlugInForBundleID,
+ kAudioObjectPropertyScopeGlobal,
+ kAudioObjectPropertyElementMaster
+ };
+
+ UInt32 size = sizeof(plugin_translation);
+ OSStatus result = AudioObjectGetPropertyData(
+ kAudioObjectSystemObject,
+ &kPlugInAddress,
+ 0,
+ 0,
+ &size,
+ &plugin_translation);
+
+ DVLOG(1) << "CoreAudio plugin ID: " << *id;
+
+ return result;
+}
+
+CFMutableDictionaryRef
+AggregateDeviceManager::CreateAggregateDeviceDictionary(
+ AudioDeviceID input_id,
+ AudioDeviceID output_id) {
+ CFMutableDictionaryRef aggregate_device_dict = CFDictionaryCreateMutable(
+ NULL,
+ 0,
+ &kCFTypeDictionaryKeyCallBacks,
+ &kCFTypeDictionaryValueCallBacks);
+ if (!aggregate_device_dict)
+ return NULL;
+
+ const CFStringRef kAggregateDeviceName =
+ CFSTR("ChromeAggregateAudioDevice");
+ const CFStringRef kAggregateDeviceUID =
+ CFSTR("com.google.chrome.AggregateAudioDevice");
+
+ // Add name and UID of the device to the dictionary.
+ CFDictionaryAddValue(
+ aggregate_device_dict,
+ CFSTR(kAudioAggregateDeviceNameKey),
+ kAggregateDeviceName);
+ CFDictionaryAddValue(
+ aggregate_device_dict,
+ CFSTR(kAudioAggregateDeviceUIDKey),
+ kAggregateDeviceUID);
+
+ // Add a "private aggregate key" to the dictionary.
+ // The 1 value means that the created aggregate device will
+ // only be accessible from the process that created it, and
+ // won't be visible to outside processes.
+ int value = 1;
+ ScopedCFTypeRef<CFNumberRef> aggregate_device_number(CFNumberCreate(
+ NULL,
+ kCFNumberIntType,
+ &value));
+ CFDictionaryAddValue(
+ aggregate_device_dict,
+ CFSTR(kAudioAggregateDeviceIsPrivateKey),
+ aggregate_device_number);
+
+ return aggregate_device_dict;
+}
+
+CFMutableArrayRef
+AggregateDeviceManager::CreateSubDeviceArray(
+ CFStringRef input_device_UID, CFStringRef output_device_UID) {
+ CFMutableArrayRef sub_devices_array = CFArrayCreateMutable(
+ NULL,
+ 0,
+ &kCFTypeArrayCallBacks);
+
+ CFArrayAppendValue(sub_devices_array, input_device_UID);
+ CFArrayAppendValue(sub_devices_array, output_device_UID);
+
+ return sub_devices_array;
+}
+
+OSStatus AggregateDeviceManager::CreateAggregateDevice(
+ AudioDeviceID input_id,
+ AudioDeviceID output_id,
+ AudioDeviceID* aggregate_device) {
+ DCHECK(aggregate_device);
+
+ const size_t kMaxDeviceNameLength = 256;
+
+ scoped_ptr<char[]> input_device_name(new char[kMaxDeviceNameLength]);
+ GetDeviceName(
+ input_id,
+ input_device_name.get(),
+ sizeof(input_device_name));
+ DVLOG(1) << "Input device: \n" << input_device_name;
+
+ scoped_ptr<char[]> output_device_name(new char[kMaxDeviceNameLength]);
+ GetDeviceName(
+ output_id,
+ output_device_name.get(),
+ sizeof(output_device_name));
+ DVLOG(1) << "Output device: \n" << output_device_name;
+
+ OSStatus result = GetPluginID(&plugin_id_);
+ if (result != noErr)
+ return result;
+
+ // Create a dictionary for the aggregate device.
+ ScopedCFTypeRef<CFMutableDictionaryRef> aggregate_device_dict(
+ CreateAggregateDeviceDictionary(input_id, output_id));
+ if (!aggregate_device_dict)
+ return -1;
+
+ // Create the aggregate device.
+ static const AudioObjectPropertyAddress kCreateAggregateDeviceAddress = {
+ kAudioPlugInCreateAggregateDevice,
+ kAudioObjectPropertyScopeGlobal,
+ kAudioObjectPropertyElementMaster
+ };
+
+ UInt32 size = sizeof(*aggregate_device);
+ result = AudioObjectGetPropertyData(
+ plugin_id_,
+ &kCreateAggregateDeviceAddress,
+ sizeof(aggregate_device_dict),
+ &aggregate_device_dict,
+ &size,
+ aggregate_device);
+ if (result != noErr) {
+ DLOG(ERROR) << "Error creating aggregate audio device!";
+ return result;
+ }
+
+ // Set the sub-devices for the aggregate device.
+ // In this case we use two: the input and output devices.
+
+ ScopedCFTypeRef<CFStringRef> input_device_UID(GetDeviceUID(input_id));
+ ScopedCFTypeRef<CFStringRef> output_device_UID(GetDeviceUID(output_id));
+ if (!input_device_UID || !output_device_UID) {
+ DLOG(ERROR) << "Error getting audio device UID strings.";
+ return -1;
+ }
+
+ ScopedCFTypeRef<CFMutableArrayRef> sub_devices_array(
+ CreateSubDeviceArray(input_device_UID, output_device_UID));
+ if (sub_devices_array == NULL) {
+ DLOG(ERROR) << "Error creating sub-devices array.";
+ return -1;
+ }
+
+ static const AudioObjectPropertyAddress kSetSubDevicesAddress = {
+ kAudioAggregateDevicePropertyFullSubDeviceList,
+ kAudioObjectPropertyScopeGlobal,
+ kAudioObjectPropertyElementMaster
+ };
+
+ size = sizeof(CFMutableArrayRef);
+ result = AudioObjectSetPropertyData(
+ *aggregate_device,
+ &kSetSubDevicesAddress,
+ 0,
+ NULL,
+ size,
+ &sub_devices_array);
+ if (result != noErr) {
+ DLOG(ERROR) << "Error setting aggregate audio device sub-devices!";
+ return result;
+ }
+
+ // Use the input device as the master device.
+ static const AudioObjectPropertyAddress kSetMasterDeviceAddress = {
+ kAudioAggregateDevicePropertyMasterSubDevice,
+ kAudioObjectPropertyScopeGlobal,
+ kAudioObjectPropertyElementMaster
+ };
+
+ size = sizeof(CFStringRef);
+ result = AudioObjectSetPropertyData(
+ *aggregate_device,
+ &kSetMasterDeviceAddress,
+ 0,
+ NULL,
+ size,
+ &input_device_UID);
+ if (result != noErr) {
+ DLOG(ERROR) << "Error setting aggregate audio device master device!";
+ return result;
+ }
+
+ DVLOG(1) << "New aggregate device: " << *aggregate_device;
+ return noErr;
+}
+
+void AggregateDeviceManager::DestroyAggregateDevice() {
+ if (aggregate_device_ == kAudioObjectUnknown)
+ return;
+
+ static const AudioObjectPropertyAddress kDestroyAddress = {
+ kAudioPlugInDestroyAggregateDevice,
+ kAudioObjectPropertyScopeGlobal,
+ kAudioObjectPropertyElementMaster
+ };
+
+ UInt32 size = sizeof(aggregate_device_);
+ OSStatus result = AudioObjectGetPropertyData(
+ plugin_id_,
+ &kDestroyAddress,
+ 0,
+ NULL,
+ &size,
+ &aggregate_device_);
+ if (result != noErr) {
+ DLOG(ERROR) << "Error destroying aggregate audio device!";
+ return;
+ }
+
+ aggregate_device_ = kAudioObjectUnknown;
+}
+
+} // namespace media
diff --git a/media/audio/mac/aggregate_device_manager.h b/media/audio/mac/aggregate_device_manager.h
new file mode 100644
index 0000000..7b8b71f
--- /dev/null
+++ b/media/audio/mac/aggregate_device_manager.h
@@ -0,0 +1,58 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef MEDIA_AUDIO_MAC_AGGREGATE_DEVICE_MANAGER_H_
+#define MEDIA_AUDIO_MAC_AGGREGATE_DEVICE_MANAGER_H_
+
+#include <CoreAudio/CoreAudio.h>
+
+#include "base/basictypes.h"
+#include "base/compiler_specific.h"
+#include "media/base/media_export.h"
+
+namespace media {
+
+class MEDIA_EXPORT AggregateDeviceManager {
+ public:
+ AggregateDeviceManager();
+ ~AggregateDeviceManager();
+
+ // Lazily creates an aggregate device based on the default
+ // input and output devices.
+ // It will either return a valid device or kAudioDeviceUnknown
+ // if the default devices are not suitable for aggregate devices.
+ AudioDeviceID GetDefaultAggregateDevice();
+
+ private:
+ // The caller is responsible for releasing the CFStringRef.
+ static CFStringRef GetDeviceUID(AudioDeviceID id);
+
+ static void GetDeviceName(AudioDeviceID id, char* name, UInt32 size);
+ static UInt32 GetClockDomain(AudioDeviceID device_id);
+ static OSStatus GetPluginID(AudioObjectID* id);
+
+ CFMutableDictionaryRef CreateAggregateDeviceDictionary(
+ AudioDeviceID input_id,
+ AudioDeviceID output_id);
+
+ CFMutableArrayRef CreateSubDeviceArray(CFStringRef input_device_UID,
+ CFStringRef output_device_UID);
+
+ OSStatus CreateAggregateDevice(AudioDeviceID input_id,
+ AudioDeviceID output_id,
+ AudioDeviceID* aggregate_device);
+ void DestroyAggregateDevice();
+
+ AudioObjectID plugin_id_;
+ AudioDeviceID input_device_;
+ AudioDeviceID output_device_;
+
+ AudioDeviceID aggregate_device_;
+
+ DISALLOW_COPY_AND_ASSIGN(AggregateDeviceManager);
+};
+
+} // namespace media
+
+#endif // MEDIA_AUDIO_MAC_AGGREGATE_DEVICE_MANAGER_H_
diff --git a/media/audio/mac/audio_auhal_mac_unittest.cc b/media/audio/mac/audio_auhal_mac_unittest.cc
index fd0ffff..9babd80 100644
--- a/media/audio/mac/audio_auhal_mac_unittest.cc
+++ b/media/audio/mac/audio_auhal_mac_unittest.cc
@@ -45,7 +45,7 @@ class AUHALStreamTest : public testing::Test {
AudioOutputStream* Create() {
return manager_->MakeAudioOutputStream(
- manager_->GetDefaultOutputStreamParameters(), "");
+ manager_->GetDefaultOutputStreamParameters(), "", "");
}
bool CanRunAudioTests() {
diff --git a/media/audio/mac/audio_manager_mac.cc b/media/audio/mac/audio_manager_mac.cc
index e86e7b48..c08efff 100644
--- a/media/audio/mac/audio_manager_mac.cc
+++ b/media/audio/mac/audio_manager_mac.cc
@@ -20,6 +20,8 @@
#include "media/audio/mac/audio_input_mac.h"
#include "media/audio/mac/audio_low_latency_input_mac.h"
#include "media/audio/mac/audio_low_latency_output_mac.h"
+#include "media/audio/mac/audio_synchronized_mac.h"
+#include "media/audio/mac/audio_unified_mac.h"
#include "media/base/bind_to_current_loop.h"
#include "media/base/channel_layout.h"
#include "media/base/limits.h"
@@ -54,6 +56,16 @@ static bool HasAudioHardware(AudioObjectPropertySelector selector) {
output_device_id != kAudioObjectUnknown;
}
+// Returns true if the default input device is the same as
+// the default output device.
+bool AudioManagerMac::HasUnifiedDefaultIO() {
+ AudioDeviceID input_id, output_id;
+ if (!GetDefaultInputDevice(&input_id) || !GetDefaultOutputDevice(&output_id))
+ return false;
+
+ return input_id == output_id;
+}
+
// Retrieves information on audio devices, and prepends the default
// device to the list if the list is non-empty.
static void GetAudioDeviceInfo(bool is_input,
@@ -554,18 +566,72 @@ std::string AudioManagerMac::GetAssociatedOutputDeviceID(
AudioOutputStream* AudioManagerMac::MakeLinearOutputStream(
const AudioParameters& params) {
- return MakeLowLatencyOutputStream(params, std::string());
+ return MakeLowLatencyOutputStream(params, std::string(), std::string());
}
AudioOutputStream* AudioManagerMac::MakeLowLatencyOutputStream(
const AudioParameters& params,
- const std::string& device_id) {
- AudioDeviceID device = GetAudioDeviceIdByUId(false, device_id);
- if (device == kAudioObjectUnknown) {
- DLOG(ERROR) << "Failed to open output device: " << device_id;
+ const std::string& device_id,
+ const std::string& input_device_id) {
+ // Handle basic output with no input channels.
+ if (params.input_channels() == 0) {
+ AudioDeviceID device = GetAudioDeviceIdByUId(false, device_id);
+ if (device == kAudioObjectUnknown) {
+ DLOG(ERROR) << "Failed to open output device: " << device_id;
+ return NULL;
+ }
+ return new AUHALStream(this, params, device);
+ }
+
+ DLOG_IF(ERROR, !device_id.empty()) << "Not implemented!";
+
+ // TODO(xians): support more than stereo input.
+ if (params.input_channels() != 2) {
+ // WebAudio is currently hard-coded to 2 channels so we should not
+ // see this case.
+ NOTREACHED() << "Only stereo input is currently supported!";
return NULL;
}
- return new AUHALStream(this, params, device);
+
+ AudioDeviceID device = kAudioObjectUnknown;
+ if (HasUnifiedDefaultIO()) {
+ // For I/O, the simplest case is when the default input and output
+ // devices are the same.
+ GetDefaultOutputDevice(&device);
+ VLOG(0) << "UNIFIED: default input and output devices are identical";
+ } else {
+ // Some audio hardware is presented as separate input and output devices
+ // even though they are really the same physical hardware and
+ // share the same "clock domain" at the lowest levels of the driver.
+ // A common of example of this is the "built-in" audio hardware:
+ // "Built-in Line Input"
+ // "Built-in Output"
+ // We would like to use an "aggregate" device for these situations, since
+ // CoreAudio will make the most efficient use of the shared "clock domain"
+ // so we get the lowest latency and use fewer threads.
+ device = aggregate_device_manager_.GetDefaultAggregateDevice();
+ if (device != kAudioObjectUnknown)
+ VLOG(0) << "Using AGGREGATE audio device";
+ }
+
+ if (device != kAudioObjectUnknown &&
+ input_device_id == AudioManagerBase::kDefaultDeviceId)
+ return new AUHALStream(this, params, device);
+
+ // Fallback to AudioSynchronizedStream which will handle completely
+ // different and arbitrary combinations of input and output devices
+ // even running at different sample-rates.
+ // kAudioDeviceUnknown translates to "use default" here.
+ // TODO(xians): consider tracking UMA stats on AUHALStream
+ // versus AudioSynchronizedStream.
+ AudioDeviceID audio_device_id = GetAudioDeviceIdByUId(true, input_device_id);
+ if (audio_device_id == kAudioObjectUnknown)
+ return NULL;
+
+ return new AudioSynchronizedStream(this,
+ params,
+ audio_device_id,
+ kAudioDeviceUnknown);
}
std::string AudioManagerMac::GetDefaultOutputDeviceID() {
diff --git a/media/audio/mac/audio_manager_mac.h b/media/audio/mac/audio_manager_mac.h
index 0d4e05f..641f9d3 100644
--- a/media/audio/mac/audio_manager_mac.h
+++ b/media/audio/mac/audio_manager_mac.h
@@ -11,6 +11,7 @@
#include "base/basictypes.h"
#include "base/compiler_specific.h"
#include "media/audio/audio_manager_base.h"
+#include "media/audio/mac/aggregate_device_manager.h"
#include "media/audio/mac/audio_device_listener_mac.h"
namespace media {
@@ -39,7 +40,8 @@ class MEDIA_EXPORT AudioManagerMac : public AudioManagerBase {
const AudioParameters& params) OVERRIDE;
virtual AudioOutputStream* MakeLowLatencyOutputStream(
const AudioParameters& params,
- const std::string& device_id) OVERRIDE;
+ const std::string& device_id,
+ const std::string& input_device_id) OVERRIDE;
virtual AudioInputStream* MakeLinearInputStream(
const AudioParameters& params, const std::string& device_id) OVERRIDE;
virtual AudioInputStream* MakeLowLatencyInputStream(
@@ -78,6 +80,8 @@ class MEDIA_EXPORT AudioManagerMac : public AudioManagerBase {
const AudioParameters& input_params) OVERRIDE;
private:
+ bool HasUnifiedDefaultIO();
+
// Helper methods for constructing AudioDeviceListenerMac on the audio thread.
void CreateDeviceListener();
void DestroyDeviceListener();
@@ -95,6 +99,8 @@ class MEDIA_EXPORT AudioManagerMac : public AudioManagerBase {
int current_sample_rate_;
AudioDeviceID current_output_device_;
+ AggregateDeviceManager aggregate_device_manager_;
+
// Helper class which monitors power events to determine if output streams
// should defer Start() calls. Required to workaround an OSX bug. See
// http://crbug.com/160920 for more details.
diff --git a/media/audio/mac/audio_synchronized_mac.cc b/media/audio/mac/audio_synchronized_mac.cc
new file mode 100644
index 0000000..a9bc88e
--- /dev/null
+++ b/media/audio/mac/audio_synchronized_mac.cc
@@ -0,0 +1,976 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "media/audio/mac/audio_synchronized_mac.h"
+
+#include <CoreServices/CoreServices.h>
+#include <algorithm>
+
+#include "base/basictypes.h"
+#include "base/debug/trace_event.h"
+#include "base/logging.h"
+#include "base/mac/mac_logging.h"
+#include "media/audio/mac/audio_manager_mac.h"
+#include "media/base/channel_mixer.h"
+
+namespace media {
+
+static const int kHardwareBufferSize = 128;
+static const int kFifoSize = 16384;
+
+// TODO(crogers): handle the non-stereo case.
+static const int kChannels = 2;
+
+// This value was determined empirically for minimum latency while still
+// guarding against FIFO under-runs.
+static const int kBaseTargetFifoFrames = 256 + 64;
+
+// If the input and output sample-rate don't match, then we need to maintain
+// an additional safety margin due to the callback timing jitter and the
+// varispeed buffering. This value was empirically tuned.
+static const int kAdditionalTargetFifoFrames = 128;
+
+static void ZeroBufferList(AudioBufferList* buffer_list) {
+ for (size_t i = 0; i < buffer_list->mNumberBuffers; ++i)
+ memset(buffer_list->mBuffers[i].mData,
+ 0,
+ buffer_list->mBuffers[i].mDataByteSize);
+}
+
+static void WrapBufferList(AudioBufferList* buffer_list,
+ AudioBus* bus,
+ int frames) {
+ DCHECK(buffer_list);
+ DCHECK(bus);
+ int channels = bus->channels();
+ int buffer_list_channels = buffer_list->mNumberBuffers;
+
+ // Copy pointers from AudioBufferList.
+ int source_idx = 0;
+ for (int i = 0; i < channels; ++i) {
+ bus->SetChannelData(
+ i, static_cast<float*>(buffer_list->mBuffers[source_idx].mData));
+
+ // It's ok to pass in a |buffer_list| with fewer channels, in which
+ // case we just duplicate the last channel.
+ if (source_idx < buffer_list_channels - 1)
+ ++source_idx;
+ }
+
+ // Finally set the actual length.
+ bus->set_frames(frames);
+}
+
+AudioSynchronizedStream::AudioSynchronizedStream(
+ AudioManagerMac* manager,
+ const AudioParameters& params,
+ AudioDeviceID input_id,
+ AudioDeviceID output_id)
+ : manager_(manager),
+ params_(params),
+ input_sample_rate_(0),
+ output_sample_rate_(0),
+ input_id_(input_id),
+ output_id_(output_id),
+ input_buffer_list_(NULL),
+ fifo_(kChannels, kFifoSize),
+ target_fifo_frames_(kBaseTargetFifoFrames),
+ average_delta_(0.0),
+ fifo_rate_compensation_(1.0),
+ input_unit_(0),
+ varispeed_unit_(0),
+ output_unit_(0),
+ first_input_time_(-1),
+ is_running_(false),
+ hardware_buffer_size_(kHardwareBufferSize),
+ channels_(kChannels) {
+ VLOG(1) << "AudioSynchronizedStream::AudioSynchronizedStream()";
+}
+
+AudioSynchronizedStream::~AudioSynchronizedStream() {
+ DCHECK(!input_unit_);
+ DCHECK(!output_unit_);
+ DCHECK(!varispeed_unit_);
+}
+
+bool AudioSynchronizedStream::Open() {
+ if (params_.channels() != kChannels) {
+ LOG(ERROR) << "Only stereo output is currently supported.";
+ return false;
+ }
+
+ // Create the input, output, and varispeed AudioUnits.
+ OSStatus result = CreateAudioUnits();
+ if (result != noErr) {
+ LOG(ERROR) << "Cannot create AudioUnits.";
+ return false;
+ }
+
+ result = SetupInput(input_id_);
+ if (result != noErr) {
+ LOG(ERROR) << "Error configuring input AudioUnit.";
+ return false;
+ }
+
+ result = SetupOutput(output_id_);
+ if (result != noErr) {
+ LOG(ERROR) << "Error configuring output AudioUnit.";
+ return false;
+ }
+
+ result = SetupCallbacks();
+ if (result != noErr) {
+ LOG(ERROR) << "Error setting up callbacks on AudioUnits.";
+ return false;
+ }
+
+ result = SetupStreamFormats();
+ if (result != noErr) {
+ LOG(ERROR) << "Error configuring stream formats on AudioUnits.";
+ return false;
+ }
+
+ AllocateInputData();
+
+ // Final initialization of the AudioUnits.
+ result = AudioUnitInitialize(input_unit_);
+ if (result != noErr) {
+ LOG(ERROR) << "Error initializing input AudioUnit.";
+ return false;
+ }
+
+ result = AudioUnitInitialize(output_unit_);
+ if (result != noErr) {
+ LOG(ERROR) << "Error initializing output AudioUnit.";
+ return false;
+ }
+
+ result = AudioUnitInitialize(varispeed_unit_);
+ if (result != noErr) {
+ LOG(ERROR) << "Error initializing varispeed AudioUnit.";
+ return false;
+ }
+
+ if (input_sample_rate_ != output_sample_rate_) {
+ // Add extra safety margin.
+ target_fifo_frames_ += kAdditionalTargetFifoFrames;
+ }
+
+ // Buffer initial silence corresponding to target I/O buffering.
+ fifo_.Clear();
+ scoped_ptr<AudioBus> silence =
+ AudioBus::Create(channels_, target_fifo_frames_);
+ silence->Zero();
+ fifo_.Push(silence.get());
+
+ return true;
+}
+
+void AudioSynchronizedStream::Close() {
+ DCHECK(!is_running_);
+
+ if (input_buffer_list_) {
+ free(input_buffer_list_);
+ input_buffer_list_ = 0;
+ input_bus_.reset(NULL);
+ wrapper_bus_.reset(NULL);
+ }
+
+ if (input_unit_) {
+ AudioUnitUninitialize(input_unit_);
+ CloseComponent(input_unit_);
+ }
+
+ if (output_unit_) {
+ AudioUnitUninitialize(output_unit_);
+ CloseComponent(output_unit_);
+ }
+
+ if (varispeed_unit_) {
+ AudioUnitUninitialize(varispeed_unit_);
+ CloseComponent(varispeed_unit_);
+ }
+
+ input_unit_ = NULL;
+ output_unit_ = NULL;
+ varispeed_unit_ = NULL;
+
+ // Inform the audio manager that we have been closed. This can cause our
+ // destruction.
+ manager_->ReleaseOutputStream(this);
+}
+
+void AudioSynchronizedStream::Start(AudioSourceCallback* callback) {
+ DCHECK(callback);
+ DCHECK(input_unit_);
+ DCHECK(output_unit_);
+ DCHECK(varispeed_unit_);
+
+ if (is_running_ || !input_unit_ || !output_unit_ || !varispeed_unit_)
+ return;
+
+ source_ = callback;
+
+ // Reset state variables each time we Start().
+ fifo_rate_compensation_ = 1.0;
+ average_delta_ = 0.0;
+
+ OSStatus result = noErr;
+
+ if (!is_running_) {
+ first_input_time_ = -1;
+
+ result = AudioOutputUnitStart(input_unit_);
+ OSSTATUS_DCHECK(result == noErr, result);
+
+ if (result == noErr) {
+ result = AudioOutputUnitStart(output_unit_);
+ OSSTATUS_DCHECK(result == noErr, result);
+ }
+ }
+
+ is_running_ = true;
+}
+
+void AudioSynchronizedStream::Stop() {
+ OSStatus result = noErr;
+ if (is_running_) {
+ result = AudioOutputUnitStop(input_unit_);
+ OSSTATUS_DCHECK(result == noErr, result);
+
+ if (result == noErr) {
+ result = AudioOutputUnitStop(output_unit_);
+ OSSTATUS_DCHECK(result == noErr, result);
+ }
+ }
+
+ if (result == noErr)
+ is_running_ = false;
+}
+
+bool AudioSynchronizedStream::IsRunning() {
+ return is_running_;
+}
+
+// TODO(crogers): implement - or remove from AudioOutputStream.
+void AudioSynchronizedStream::SetVolume(double volume) {}
+void AudioSynchronizedStream::GetVolume(double* volume) {}
+
+OSStatus AudioSynchronizedStream::SetOutputDeviceAsCurrent(
+ AudioDeviceID output_id) {
+ OSStatus result = noErr;
+
+ // Get the default output device if device is unknown.
+ if (output_id == kAudioDeviceUnknown) {
+ AudioObjectPropertyAddress pa;
+ pa.mSelector = kAudioHardwarePropertyDefaultOutputDevice;
+ pa.mScope = kAudioObjectPropertyScopeGlobal;
+ pa.mElement = kAudioObjectPropertyElementMaster;
+ UInt32 size = sizeof(output_id);
+
+ result = AudioObjectGetPropertyData(
+ kAudioObjectSystemObject,
+ &pa,
+ 0,
+ 0,
+ &size,
+ &output_id);
+
+ OSSTATUS_DCHECK(result == noErr, result);
+ if (result != noErr)
+ return result;
+ }
+
+ // Set the render frame size.
+ UInt32 frame_size = hardware_buffer_size_;
+ AudioObjectPropertyAddress pa;
+ pa.mSelector = kAudioDevicePropertyBufferFrameSize;
+ pa.mScope = kAudioDevicePropertyScopeInput;
+ pa.mElement = kAudioObjectPropertyElementMaster;
+ result = AudioObjectSetPropertyData(
+ output_id,
+ &pa,
+ 0,
+ 0,
+ sizeof(frame_size),
+ &frame_size);
+
+ OSSTATUS_DCHECK(result == noErr, result);
+ if (result != noErr)
+ return result;
+
+ output_info_.Initialize(output_id, false);
+
+ // Set the Current Device to the Default Output Unit.
+ result = AudioUnitSetProperty(
+ output_unit_,
+ kAudioOutputUnitProperty_CurrentDevice,
+ kAudioUnitScope_Global,
+ 0,
+ &output_info_.id_,
+ sizeof(output_info_.id_));
+
+ OSSTATUS_DCHECK(result == noErr, result);
+ return result;
+}
+
+OSStatus AudioSynchronizedStream::SetInputDeviceAsCurrent(
+ AudioDeviceID input_id) {
+ OSStatus result = noErr;
+
+ // Get the default input device if device is unknown.
+ if (input_id == kAudioDeviceUnknown) {
+ AudioObjectPropertyAddress pa;
+ pa.mSelector = kAudioHardwarePropertyDefaultInputDevice;
+ pa.mScope = kAudioObjectPropertyScopeGlobal;
+ pa.mElement = kAudioObjectPropertyElementMaster;
+ UInt32 size = sizeof(input_id);
+
+ result = AudioObjectGetPropertyData(
+ kAudioObjectSystemObject,
+ &pa,
+ 0,
+ 0,
+ &size,
+ &input_id);
+
+ OSSTATUS_DCHECK(result == noErr, result);
+ if (result != noErr)
+ return result;
+ }
+
+ // Set the render frame size.
+ UInt32 frame_size = hardware_buffer_size_;
+ AudioObjectPropertyAddress pa;
+ pa.mSelector = kAudioDevicePropertyBufferFrameSize;
+ pa.mScope = kAudioDevicePropertyScopeInput;
+ pa.mElement = kAudioObjectPropertyElementMaster;
+ result = AudioObjectSetPropertyData(
+ input_id,
+ &pa,
+ 0,
+ 0,
+ sizeof(frame_size),
+ &frame_size);
+
+ OSSTATUS_DCHECK(result == noErr, result);
+ if (result != noErr)
+ return result;
+
+ input_info_.Initialize(input_id, true);
+
+ // Set the Current Device to the AUHAL.
+ // This should be done only after I/O has been enabled on the AUHAL.
+ result = AudioUnitSetProperty(
+ input_unit_,
+ kAudioOutputUnitProperty_CurrentDevice,
+ kAudioUnitScope_Global,
+ 0,
+ &input_info_.id_,
+ sizeof(input_info_.id_));
+
+ OSSTATUS_DCHECK(result == noErr, result);
+ return result;
+}
+
+OSStatus AudioSynchronizedStream::CreateAudioUnits() {
+ // Q: Why do we need a varispeed unit?
+ // A: If the input device and the output device are running at
+ // different sample rates and/or on different clocks, we will need
+ // to compensate to avoid a pitch change and
+ // to avoid buffer under and over runs.
+ ComponentDescription varispeed_desc;
+ varispeed_desc.componentType = kAudioUnitType_FormatConverter;
+ varispeed_desc.componentSubType = kAudioUnitSubType_Varispeed;
+ varispeed_desc.componentManufacturer = kAudioUnitManufacturer_Apple;
+ varispeed_desc.componentFlags = 0;
+ varispeed_desc.componentFlagsMask = 0;
+
+ Component varispeed_comp = FindNextComponent(NULL, &varispeed_desc);
+ if (varispeed_comp == NULL)
+ return -1;
+
+ OSStatus result = OpenAComponent(varispeed_comp, &varispeed_unit_);
+ OSSTATUS_DCHECK(result == noErr, result);
+ if (result != noErr)
+ return result;
+
+ // Open input AudioUnit.
+ ComponentDescription input_desc;
+ input_desc.componentType = kAudioUnitType_Output;
+ input_desc.componentSubType = kAudioUnitSubType_HALOutput;
+ input_desc.componentManufacturer = kAudioUnitManufacturer_Apple;
+ input_desc.componentFlags = 0;
+ input_desc.componentFlagsMask = 0;
+
+ Component input_comp = FindNextComponent(NULL, &input_desc);
+ if (input_comp == NULL)
+ return -1;
+
+ result = OpenAComponent(input_comp, &input_unit_);
+ OSSTATUS_DCHECK(result == noErr, result);
+ if (result != noErr)
+ return result;
+
+ // Open output AudioUnit.
+ ComponentDescription output_desc;
+ output_desc.componentType = kAudioUnitType_Output;
+ output_desc.componentSubType = kAudioUnitSubType_DefaultOutput;
+ output_desc.componentManufacturer = kAudioUnitManufacturer_Apple;
+ output_desc.componentFlags = 0;
+ output_desc.componentFlagsMask = 0;
+
+ Component output_comp = FindNextComponent(NULL, &output_desc);
+ if (output_comp == NULL)
+ return -1;
+
+ result = OpenAComponent(output_comp, &output_unit_);
+ OSSTATUS_DCHECK(result == noErr, result);
+ if (result != noErr)
+ return result;
+
+ return noErr;
+}
+
+OSStatus AudioSynchronizedStream::SetupInput(AudioDeviceID input_id) {
+ // The AUHAL used for input needs to be initialized
+ // before anything is done to it.
+ OSStatus result = AudioUnitInitialize(input_unit_);
+ OSSTATUS_DCHECK(result == noErr, result);
+ if (result != noErr)
+ return result;
+
+ // We must enable the Audio Unit (AUHAL) for input and disable output
+ // BEFORE setting the AUHAL's current device.
+ result = EnableIO();
+ OSSTATUS_DCHECK(result == noErr, result);
+ if (result != noErr)
+ return result;
+
+ result = SetInputDeviceAsCurrent(input_id);
+ OSSTATUS_DCHECK(result == noErr, result);
+
+ return result;
+}
+
+OSStatus AudioSynchronizedStream::EnableIO() {
+ // Enable input on the AUHAL.
+ UInt32 enable_io = 1;
+ OSStatus result = AudioUnitSetProperty(
+ input_unit_,
+ kAudioOutputUnitProperty_EnableIO,
+ kAudioUnitScope_Input,
+ 1, // input element
+ &enable_io,
+ sizeof(enable_io));
+
+ OSSTATUS_DCHECK(result == noErr, result);
+ if (result != noErr)
+ return result;
+
+ // Disable Output on the AUHAL.
+ enable_io = 0;
+ result = AudioUnitSetProperty(
+ input_unit_,
+ kAudioOutputUnitProperty_EnableIO,
+ kAudioUnitScope_Output,
+ 0, // output element
+ &enable_io,
+ sizeof(enable_io));
+
+ OSSTATUS_DCHECK(result == noErr, result);
+ return result;
+}
+
+OSStatus AudioSynchronizedStream::SetupOutput(AudioDeviceID output_id) {
+ OSStatus result = noErr;
+
+ result = SetOutputDeviceAsCurrent(output_id);
+ OSSTATUS_DCHECK(result == noErr, result);
+ if (result != noErr)
+ return result;
+
+ // Tell the output unit not to reset timestamps.
+ // Otherwise sample rate changes will cause sync loss.
+ UInt32 start_at_zero = 0;
+ result = AudioUnitSetProperty(
+ output_unit_,
+ kAudioOutputUnitProperty_StartTimestampsAtZero,
+ kAudioUnitScope_Global,
+ 0,
+ &start_at_zero,
+ sizeof(start_at_zero));
+
+ OSSTATUS_DCHECK(result == noErr, result);
+
+ return result;
+}
+
+OSStatus AudioSynchronizedStream::SetupCallbacks() {
+ // Set the input callback.
+ AURenderCallbackStruct callback;
+ callback.inputProc = InputProc;
+ callback.inputProcRefCon = this;
+ OSStatus result = AudioUnitSetProperty(
+ input_unit_,
+ kAudioOutputUnitProperty_SetInputCallback,
+ kAudioUnitScope_Global,
+ 0,
+ &callback,
+ sizeof(callback));
+
+ OSSTATUS_DCHECK(result == noErr, result);
+ if (result != noErr)
+ return result;
+
+ // Set the output callback.
+ callback.inputProc = OutputProc;
+ callback.inputProcRefCon = this;
+ result = AudioUnitSetProperty(
+ output_unit_,
+ kAudioUnitProperty_SetRenderCallback,
+ kAudioUnitScope_Input,
+ 0,
+ &callback,
+ sizeof(callback));
+
+ OSSTATUS_DCHECK(result == noErr, result);
+ if (result != noErr)
+ return result;
+
+ // Set the varispeed callback.
+ callback.inputProc = VarispeedProc;
+ callback.inputProcRefCon = this;
+ result = AudioUnitSetProperty(
+ varispeed_unit_,
+ kAudioUnitProperty_SetRenderCallback,
+ kAudioUnitScope_Input,
+ 0,
+ &callback,
+ sizeof(callback));
+
+ OSSTATUS_DCHECK(result == noErr, result);
+
+ return result;
+}
+
+OSStatus AudioSynchronizedStream::SetupStreamFormats() {
+ AudioStreamBasicDescription asbd, asbd_dev1_in, asbd_dev2_out;
+
+ // Get the Stream Format (Output client side).
+ UInt32 property_size = sizeof(asbd_dev1_in);
+ OSStatus result = AudioUnitGetProperty(
+ input_unit_,
+ kAudioUnitProperty_StreamFormat,
+ kAudioUnitScope_Input,
+ 1,
+ &asbd_dev1_in,
+ &property_size);
+
+ OSSTATUS_DCHECK(result == noErr, result);
+ if (result != noErr)
+ return result;
+
+ // Get the Stream Format (client side).
+ property_size = sizeof(asbd);
+ result = AudioUnitGetProperty(
+ input_unit_,
+ kAudioUnitProperty_StreamFormat,
+ kAudioUnitScope_Output,
+ 1,
+ &asbd,
+ &property_size);
+
+ OSSTATUS_DCHECK(result == noErr, result);
+ if (result != noErr)
+ return result;
+
+ // Get the Stream Format (Output client side).
+ property_size = sizeof(asbd_dev2_out);
+ result = AudioUnitGetProperty(
+ output_unit_,
+ kAudioUnitProperty_StreamFormat,
+ kAudioUnitScope_Output,
+ 0,
+ &asbd_dev2_out,
+ &property_size);
+
+ OSSTATUS_DCHECK(result == noErr, result);
+ if (result != noErr)
+ return result;
+
+ // Set the format of all the AUs to the input/output devices channel count.
+ // For a simple case, you want to set this to
+ // the lower of count of the channels in the input device vs output device.
+ asbd.mChannelsPerFrame = std::min(asbd_dev1_in.mChannelsPerFrame,
+ asbd_dev2_out.mChannelsPerFrame);
+
+ // We must get the sample rate of the input device and set it to the
+ // stream format of AUHAL.
+ Float64 rate = 0;
+ property_size = sizeof(rate);
+
+ AudioObjectPropertyAddress pa;
+ pa.mSelector = kAudioDevicePropertyNominalSampleRate;
+ pa.mScope = kAudioObjectPropertyScopeWildcard;
+ pa.mElement = kAudioObjectPropertyElementMaster;
+ result = AudioObjectGetPropertyData(
+ input_info_.id_,
+ &pa,
+ 0,
+ 0,
+ &property_size,
+ &rate);
+
+ OSSTATUS_DCHECK(result == noErr, result);
+ if (result != noErr)
+ return result;
+
+ input_sample_rate_ = rate;
+
+ asbd.mSampleRate = rate;
+ property_size = sizeof(asbd);
+
+ // Set the new formats to the AUs...
+ result = AudioUnitSetProperty(
+ input_unit_,
+ kAudioUnitProperty_StreamFormat,
+ kAudioUnitScope_Output,
+ 1,
+ &asbd,
+ property_size);
+
+ OSSTATUS_DCHECK(result == noErr, result);
+ if (result != noErr)
+ return result;
+
+ result = AudioUnitSetProperty(
+ varispeed_unit_,
+ kAudioUnitProperty_StreamFormat,
+ kAudioUnitScope_Input,
+ 0,
+ &asbd,
+ property_size);
+
+ OSSTATUS_DCHECK(result == noErr, result);
+ if (result != noErr)
+ return result;
+
+ // Set the correct sample rate for the output device,
+ // but keep the channel count the same.
+ property_size = sizeof(rate);
+
+ pa.mSelector = kAudioDevicePropertyNominalSampleRate;
+ pa.mScope = kAudioObjectPropertyScopeWildcard;
+ pa.mElement = kAudioObjectPropertyElementMaster;
+ result = AudioObjectGetPropertyData(
+ output_info_.id_,
+ &pa,
+ 0,
+ 0,
+ &property_size,
+ &rate);
+
+ OSSTATUS_DCHECK(result == noErr, result);
+ if (result != noErr)
+ return result;
+
+ output_sample_rate_ = rate;
+
+ // The requested sample-rate must match the hardware sample-rate.
+ if (output_sample_rate_ != params_.sample_rate()) {
+ LOG(ERROR) << "Requested sample-rate: " << params_.sample_rate()
+ << " must match the hardware sample-rate: " << output_sample_rate_;
+ return kAudioDeviceUnsupportedFormatError;
+ }
+
+ asbd.mSampleRate = rate;
+ property_size = sizeof(asbd);
+
+ // Set the new audio stream formats for the rest of the AUs...
+ result = AudioUnitSetProperty(
+ varispeed_unit_,
+ kAudioUnitProperty_StreamFormat,
+ kAudioUnitScope_Output,
+ 0,
+ &asbd,
+ property_size);
+
+ OSSTATUS_DCHECK(result == noErr, result);
+ if (result != noErr)
+ return result;
+
+ result = AudioUnitSetProperty(
+ output_unit_,
+ kAudioUnitProperty_StreamFormat,
+ kAudioUnitScope_Input,
+ 0,
+ &asbd,
+ property_size);
+
+ OSSTATUS_DCHECK(result == noErr, result);
+ return result;
+}
+
+void AudioSynchronizedStream::AllocateInputData() {
+ // Get the native number of input channels that the hardware supports.
+ int hardware_channels = 0;
+ bool got_hardware_channels = AudioManagerMac::GetDeviceChannels(
+ input_id_, kAudioDevicePropertyScopeInput, &hardware_channels);
+ if (!got_hardware_channels || hardware_channels > 2) {
+ // Only mono and stereo are supported on the input side. When it fails to
+ // get the native channel number or the native channel number is bigger
+ // than 2, we open the device in stereo mode.
+ hardware_channels = 2;
+ }
+
+ // Allocate storage for the AudioBufferList used for the
+ // input data from the input AudioUnit.
+ // We allocate enough space for with one AudioBuffer per channel.
+ size_t malloc_size = offsetof(AudioBufferList, mBuffers[0]) +
+ (sizeof(AudioBuffer) * hardware_channels);
+
+ input_buffer_list_ = static_cast<AudioBufferList*>(malloc(malloc_size));
+ input_buffer_list_->mNumberBuffers = hardware_channels;
+
+ input_bus_ = AudioBus::Create(hardware_channels, hardware_buffer_size_);
+ wrapper_bus_ = AudioBus::CreateWrapper(channels_);
+ if (hardware_channels != params_.input_channels()) {
+ ChannelLayout hardware_channel_layout =
+ GuessChannelLayout(hardware_channels);
+ ChannelLayout requested_channel_layout =
+ GuessChannelLayout(params_.input_channels());
+ channel_mixer_.reset(new ChannelMixer(hardware_channel_layout,
+ requested_channel_layout));
+ mixer_bus_ = AudioBus::Create(params_.input_channels(),
+ hardware_buffer_size_);
+ }
+
+ // Allocate buffers for AudioBufferList.
+ UInt32 buffer_size_bytes = input_bus_->frames() * sizeof(Float32);
+ for (size_t i = 0; i < input_buffer_list_->mNumberBuffers; ++i) {
+ input_buffer_list_->mBuffers[i].mNumberChannels = 1;
+ input_buffer_list_->mBuffers[i].mDataByteSize = buffer_size_bytes;
+ input_buffer_list_->mBuffers[i].mData = input_bus_->channel(i);
+ }
+}
+
+OSStatus AudioSynchronizedStream::HandleInputCallback(
+ AudioUnitRenderActionFlags* io_action_flags,
+ const AudioTimeStamp* time_stamp,
+ UInt32 bus_number,
+ UInt32 number_of_frames,
+ AudioBufferList* io_data) {
+ TRACE_EVENT0("audio", "AudioSynchronizedStream::HandleInputCallback");
+
+ if (first_input_time_ < 0.0)
+ first_input_time_ = time_stamp->mSampleTime;
+
+ // Get the new audio input data.
+ OSStatus result = AudioUnitRender(
+ input_unit_,
+ io_action_flags,
+ time_stamp,
+ bus_number,
+ number_of_frames,
+ input_buffer_list_);
+
+ // TODO(xians): Add back the DCHECK after synchronize IO supports all
+ // combination of input and output params. See http://issue/246521.
+ if (result != noErr)
+ return result;
+
+ // Buffer input into FIFO.
+ int available_frames = fifo_.max_frames() - fifo_.frames();
+ if (input_bus_->frames() <= available_frames) {
+ if (channel_mixer_) {
+ channel_mixer_->Transform(input_bus_.get(), mixer_bus_.get());
+ fifo_.Push(mixer_bus_.get());
+ } else {
+ fifo_.Push(input_bus_.get());
+ }
+ }
+
+ return result;
+}
+
+OSStatus AudioSynchronizedStream::HandleVarispeedCallback(
+ AudioUnitRenderActionFlags* io_action_flags,
+ const AudioTimeStamp* time_stamp,
+ UInt32 bus_number,
+ UInt32 number_of_frames,
+ AudioBufferList* io_data) {
+ // Create a wrapper bus on the AudioBufferList.
+ WrapBufferList(io_data, wrapper_bus_.get(), number_of_frames);
+
+ if (fifo_.frames() < static_cast<int>(number_of_frames)) {
+ // We don't DCHECK here, since this is a possible run-time condition
+ // if the machine is bogged down.
+ wrapper_bus_->Zero();
+ return noErr;
+ }
+
+ // Read from the FIFO to feed the varispeed.
+ fifo_.Consume(wrapper_bus_.get(), 0, number_of_frames);
+
+ return noErr;
+}
+
+OSStatus AudioSynchronizedStream::HandleOutputCallback(
+ AudioUnitRenderActionFlags* io_action_flags,
+ const AudioTimeStamp* time_stamp,
+ UInt32 bus_number,
+ UInt32 number_of_frames,
+ AudioBufferList* io_data) {
+ // Input callback hasn't run yet or we've suddenly changed sample-rates
+ // -> silence.
+ if (first_input_time_ < 0.0 ||
+ static_cast<int>(number_of_frames) != params_.frames_per_buffer()) {
+ ZeroBufferList(io_data);
+ return noErr;
+ }
+
+ // Use the varispeed playback rate to offset small discrepancies
+ // in hardware clocks, and also any differences in sample-rate
+ // between input and output devices.
+
+ // Calculate a varispeed rate scalar factor to compensate for drift between
+ // input and output. We use the actual number of frames still in the FIFO
+ // compared with the ideal value of |target_fifo_frames_|.
+ int delta = fifo_.frames() - target_fifo_frames_;
+
+ // Average |delta| because it can jitter back/forth quite frequently
+ // by +/- the hardware buffer-size *if* the input and output callbacks are
+ // happening at almost exactly the same time. Also, if the input and output
+ // sample-rates are different then |delta| will jitter quite a bit due to
+ // the rate conversion happening in the varispeed, plus the jittering of
+ // the callbacks. The average value is what's important here.
+ average_delta_ += (delta - average_delta_) * 0.1;
+
+ // Compute a rate compensation which always attracts us back to the
+ // |target_fifo_frames_| over a period of kCorrectionTimeSeconds.
+ const double kCorrectionTimeSeconds = 0.1;
+ double correction_time_frames = kCorrectionTimeSeconds * output_sample_rate_;
+ fifo_rate_compensation_ =
+ (correction_time_frames + average_delta_) / correction_time_frames;
+
+ // Adjust for FIFO drift.
+ OSStatus result = AudioUnitSetParameter(
+ varispeed_unit_,
+ kVarispeedParam_PlaybackRate,
+ kAudioUnitScope_Global,
+ 0,
+ fifo_rate_compensation_,
+ 0);
+
+ OSSTATUS_DCHECK(result == noErr, result);
+ if (result != noErr)
+ return result;
+
+ // Render to the output using the varispeed.
+ result = AudioUnitRender(
+ varispeed_unit_,
+ io_action_flags,
+ time_stamp,
+ 0,
+ number_of_frames,
+ io_data);
+
+ OSSTATUS_DCHECK(result == noErr, result);
+ if (result != noErr)
+ return result;
+
+ // Create a wrapper bus on the AudioBufferList.
+ WrapBufferList(io_data, wrapper_bus_.get(), number_of_frames);
+
+ // Process in-place!
+ source_->OnMoreIOData(wrapper_bus_.get(),
+ wrapper_bus_.get(),
+ AudioBuffersState(0, 0));
+
+ return noErr;
+}
+
+OSStatus AudioSynchronizedStream::InputProc(
+ void* user_data,
+ AudioUnitRenderActionFlags* io_action_flags,
+ const AudioTimeStamp* time_stamp,
+ UInt32 bus_number,
+ UInt32 number_of_frames,
+ AudioBufferList* io_data) {
+ AudioSynchronizedStream* stream =
+ static_cast<AudioSynchronizedStream*>(user_data);
+ DCHECK(stream);
+
+ return stream->HandleInputCallback(
+ io_action_flags,
+ time_stamp,
+ bus_number,
+ number_of_frames,
+ io_data);
+}
+
+OSStatus AudioSynchronizedStream::VarispeedProc(
+ void* user_data,
+ AudioUnitRenderActionFlags* io_action_flags,
+ const AudioTimeStamp* time_stamp,
+ UInt32 bus_number,
+ UInt32 number_of_frames,
+ AudioBufferList* io_data) {
+ AudioSynchronizedStream* stream =
+ static_cast<AudioSynchronizedStream*>(user_data);
+ DCHECK(stream);
+
+ return stream->HandleVarispeedCallback(
+ io_action_flags,
+ time_stamp,
+ bus_number,
+ number_of_frames,
+ io_data);
+}
+
+OSStatus AudioSynchronizedStream::OutputProc(
+ void* user_data,
+ AudioUnitRenderActionFlags* io_action_flags,
+ const AudioTimeStamp* time_stamp,
+ UInt32 bus_number,
+ UInt32 number_of_frames,
+ AudioBufferList* io_data) {
+ AudioSynchronizedStream* stream =
+ static_cast<AudioSynchronizedStream*>(user_data);
+ DCHECK(stream);
+
+ return stream->HandleOutputCallback(
+ io_action_flags,
+ time_stamp,
+ bus_number,
+ number_of_frames,
+ io_data);
+}
+
+void AudioSynchronizedStream::AudioDeviceInfo::Initialize(
+ AudioDeviceID id, bool is_input) {
+ id_ = id;
+ is_input_ = is_input;
+ if (id_ == kAudioDeviceUnknown)
+ return;
+
+ UInt32 property_size = sizeof(buffer_size_frames_);
+
+ AudioObjectPropertyAddress pa;
+ pa.mSelector = kAudioDevicePropertyBufferFrameSize;
+ pa.mScope = kAudioObjectPropertyScopeWildcard;
+ pa.mElement = kAudioObjectPropertyElementMaster;
+ OSStatus result = AudioObjectGetPropertyData(
+ id_,
+ &pa,
+ 0,
+ 0,
+ &property_size,
+ &buffer_size_frames_);
+
+ OSSTATUS_DCHECK(result == noErr, result);
+}
+
+} // namespace media
diff --git a/media/audio/mac/audio_synchronized_mac.h b/media/audio/mac/audio_synchronized_mac.h
new file mode 100644
index 0000000..a6db48e
--- /dev/null
+++ b/media/audio/mac/audio_synchronized_mac.h
@@ -0,0 +1,216 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef MEDIA_AUDIO_MAC_AUDIO_SYNCHRONIZED_MAC_H_
+#define MEDIA_AUDIO_MAC_AUDIO_SYNCHRONIZED_MAC_H_
+
+#include <AudioToolbox/AudioToolbox.h>
+#include <AudioUnit/AudioUnit.h>
+#include <CoreAudio/CoreAudio.h>
+
+#include "base/compiler_specific.h"
+#include "base/synchronization/lock.h"
+#include "media/audio/audio_io.h"
+#include "media/audio/audio_parameters.h"
+#include "media/base/audio_bus.h"
+#include "media/base/audio_fifo.h"
+
+namespace media {
+
+class AudioManagerMac;
+class ChannelMixer;
+
+// AudioSynchronizedStream allows arbitrary combinations of input and output
+// devices running off different clocks and using different drivers, with
+// potentially differing sample-rates. It implements AudioOutputStream
+// and shuttles its synchronized I/O data using AudioSourceCallback.
+//
+// It is required to first acquire the native sample rate of the selected
+// output device and then use the same rate when creating this object.
+//
+// ............................................................................
+// Theory of Operation:
+// .
+// INPUT THREAD . OUTPUT THREAD
+// +-----------------+ +------+ .
+// | Input AudioUnit | --> | | .
+// +-----------------+ | | .
+// | FIFO | .
+// | | +-----------+
+// | | -----> | Varispeed |
+// | | +-----------+
+// +------+ . |
+// . | +-----------+
+// . OnMoreIOData() --> | Output AU |
+// . +-----------+
+//
+// The input AudioUnit's InputProc is called on one thread which feeds the
+// FIFO. The output AudioUnit's OutputProc is called on a second thread
+// which pulls on the varispeed to get the current input data. The varispeed
+// handles mismatches between input and output sample-rate and also clock drift
+// between the input and output drivers. The varispeed consumes its data from
+// the FIFO and adjusts its rate dynamically according to the amount
+// of data buffered in the FIFO. If the FIFO starts getting too much data
+// buffered then the varispeed will speed up slightly to compensate
+// and similarly if the FIFO doesn't have enough data buffered then the
+// varispeed will slow down slightly.
+//
+// Finally, once the input data is available then OnMoreIOData() is called
+// which is given this input, and renders the output which is finally sent
+// to the Output AudioUnit.
+class AudioSynchronizedStream : public AudioOutputStream {
+ public:
+ // The ctor takes all the usual parameters, plus |manager| which is the
+ // the audio manager who is creating this object.
+ AudioSynchronizedStream(AudioManagerMac* manager,
+ const AudioParameters& params,
+ AudioDeviceID input_id,
+ AudioDeviceID output_id);
+
+ virtual ~AudioSynchronizedStream();
+
+ // Implementation of AudioOutputStream.
+ virtual bool Open() OVERRIDE;
+ virtual void Close() OVERRIDE;
+ virtual void Start(AudioSourceCallback* callback) OVERRIDE;
+ virtual void Stop() OVERRIDE;
+
+ virtual void SetVolume(double volume) OVERRIDE;
+ virtual void GetVolume(double* volume) OVERRIDE;
+
+ OSStatus SetInputDeviceAsCurrent(AudioDeviceID input_id);
+ OSStatus SetOutputDeviceAsCurrent(AudioDeviceID output_id);
+ AudioDeviceID GetInputDeviceID() { return input_info_.id_; }
+ AudioDeviceID GetOutputDeviceID() { return output_info_.id_; }
+
+ bool IsRunning();
+
+ private:
+ // Initialization.
+ OSStatus CreateAudioUnits();
+ OSStatus SetupInput(AudioDeviceID input_id);
+ OSStatus EnableIO();
+ OSStatus SetupOutput(AudioDeviceID output_id);
+ OSStatus SetupCallbacks();
+ OSStatus SetupStreamFormats();
+ void AllocateInputData();
+
+ // Handlers for the AudioUnit callbacks.
+ OSStatus HandleInputCallback(AudioUnitRenderActionFlags* io_action_flags,
+ const AudioTimeStamp* time_stamp,
+ UInt32 bus_number,
+ UInt32 number_of_frames,
+ AudioBufferList* io_data);
+
+ OSStatus HandleVarispeedCallback(AudioUnitRenderActionFlags* io_action_flags,
+ const AudioTimeStamp* time_stamp,
+ UInt32 bus_number,
+ UInt32 number_of_frames,
+ AudioBufferList* io_data);
+
+ OSStatus HandleOutputCallback(AudioUnitRenderActionFlags* io_action_flags,
+ const AudioTimeStamp* time_stamp,
+ UInt32 bus_number,
+ UInt32 number_of_frames,
+ AudioBufferList* io_data);
+
+ // AudioUnit callbacks.
+ static OSStatus InputProc(void* user_data,
+ AudioUnitRenderActionFlags* io_action_flags,
+ const AudioTimeStamp* time_stamp,
+ UInt32 bus_number,
+ UInt32 number_of_frames,
+ AudioBufferList* io_data);
+
+ static OSStatus VarispeedProc(void* user_data,
+ AudioUnitRenderActionFlags* io_action_flags,
+ const AudioTimeStamp* time_stamp,
+ UInt32 bus_number,
+ UInt32 number_of_frames,
+ AudioBufferList* io_data);
+
+ static OSStatus OutputProc(void* user_data,
+ AudioUnitRenderActionFlags* io_action_flags,
+ const AudioTimeStamp* time_stamp,
+ UInt32 bus_number,
+ UInt32 number_of_frames,
+ AudioBufferList* io_data);
+
+ // Our creator.
+ AudioManagerMac* manager_;
+
+ // Client parameters.
+ AudioParameters params_;
+
+ double input_sample_rate_;
+ double output_sample_rate_;
+
+ // Pointer to the object that will provide the audio samples.
+ AudioSourceCallback* source_;
+
+ // Values used in Open().
+ AudioDeviceID input_id_;
+ AudioDeviceID output_id_;
+
+ // The input AudioUnit renders its data here.
+ AudioBufferList* input_buffer_list_;
+
+ // Holds the actual data for |input_buffer_list_|.
+ scoped_ptr<AudioBus> input_bus_;
+
+ // Used to overlay AudioBufferLists.
+ scoped_ptr<AudioBus> wrapper_bus_;
+
+ class AudioDeviceInfo {
+ public:
+ AudioDeviceInfo()
+ : id_(kAudioDeviceUnknown),
+ is_input_(false),
+ buffer_size_frames_(0) {}
+ void Initialize(AudioDeviceID inID, bool isInput);
+ bool IsInitialized() const { return id_ != kAudioDeviceUnknown; }
+
+ AudioDeviceID id_;
+ bool is_input_;
+ UInt32 buffer_size_frames_;
+ };
+
+ AudioDeviceInfo input_info_;
+ AudioDeviceInfo output_info_;
+
+ // Used for input to output buffering.
+ AudioFifo fifo_;
+
+ // The optimal number of frames we'd like to keep in the FIFO at all times.
+ int target_fifo_frames_;
+
+ // A running average of the measured delta between actual number of frames
+ // in the FIFO versus |target_fifo_frames_|.
+ double average_delta_;
+
+ // A varispeed rate scalar which is calculated based on FIFO drift.
+ double fifo_rate_compensation_;
+
+ // AudioUnits.
+ AudioUnit input_unit_;
+ AudioUnit varispeed_unit_;
+ AudioUnit output_unit_;
+
+ double first_input_time_;
+
+ bool is_running_;
+ int hardware_buffer_size_;
+ int channels_;
+
+ // Channel mixer used to transform mono to stereo data. It is only created
+ // if the input_hardware_channels is mono.
+ scoped_ptr<ChannelMixer> channel_mixer_;
+ scoped_ptr<AudioBus> mixer_bus_;
+
+ DISALLOW_COPY_AND_ASSIGN(AudioSynchronizedStream);
+};
+
+} // namespace media
+
+#endif // MEDIA_AUDIO_MAC_AUDIO_SYNCHRONIZED_MAC_H_
diff --git a/media/audio/mac/audio_unified_mac.cc b/media/audio/mac/audio_unified_mac.cc
new file mode 100644
index 0000000..d1dc007
--- /dev/null
+++ b/media/audio/mac/audio_unified_mac.cc
@@ -0,0 +1,397 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "media/audio/mac/audio_unified_mac.h"
+
+#include <CoreServices/CoreServices.h>
+
+#include "base/basictypes.h"
+#include "base/logging.h"
+#include "base/mac/mac_logging.h"
+#include "media/audio/mac/audio_manager_mac.h"
+
+namespace media {
+
+// TODO(crogers): support more than hard-coded stereo input.
+// Ideally we would like to receive this value as a constructor argument.
+static const int kDefaultInputChannels = 2;
+
+AudioHardwareUnifiedStream::AudioHardwareUnifiedStream(
+ AudioManagerMac* manager, const AudioParameters& params)
+ : manager_(manager),
+ source_(NULL),
+ client_input_channels_(kDefaultInputChannels),
+ volume_(1.0f),
+ input_channels_(0),
+ output_channels_(0),
+ input_channels_per_frame_(0),
+ output_channels_per_frame_(0),
+ io_proc_id_(0),
+ device_(kAudioObjectUnknown),
+ is_playing_(false) {
+ DCHECK(manager_);
+
+ // A frame is one sample across all channels. In interleaved audio the per
+ // frame fields identify the set of n |channels|. In uncompressed audio, a
+ // packet is always one frame.
+ format_.mSampleRate = params.sample_rate();
+ format_.mFormatID = kAudioFormatLinearPCM;
+ format_.mFormatFlags = kLinearPCMFormatFlagIsPacked |
+ kLinearPCMFormatFlagIsSignedInteger;
+ format_.mBitsPerChannel = params.bits_per_sample();
+ format_.mChannelsPerFrame = params.channels();
+ format_.mFramesPerPacket = 1;
+ format_.mBytesPerPacket = (format_.mBitsPerChannel * params.channels()) / 8;
+ format_.mBytesPerFrame = format_.mBytesPerPacket;
+ format_.mReserved = 0;
+
+ // Calculate the number of sample frames per callback.
+ number_of_frames_ = params.GetBytesPerBuffer() / format_.mBytesPerPacket;
+
+ input_bus_ = AudioBus::Create(client_input_channels_,
+ params.frames_per_buffer());
+ output_bus_ = AudioBus::Create(params);
+}
+
+AudioHardwareUnifiedStream::~AudioHardwareUnifiedStream() {
+ DCHECK_EQ(device_, kAudioObjectUnknown);
+}
+
+bool AudioHardwareUnifiedStream::Open() {
+ // Obtain the current output device selected by the user.
+ AudioObjectPropertyAddress pa;
+ pa.mSelector = kAudioHardwarePropertyDefaultOutputDevice;
+ pa.mScope = kAudioObjectPropertyScopeGlobal;
+ pa.mElement = kAudioObjectPropertyElementMaster;
+
+ UInt32 size = sizeof(device_);
+
+ OSStatus result = AudioObjectGetPropertyData(
+ kAudioObjectSystemObject,
+ &pa,
+ 0,
+ 0,
+ &size,
+ &device_);
+
+ if ((result != kAudioHardwareNoError) || (device_ == kAudioDeviceUnknown)) {
+ LOG(ERROR) << "Cannot open unified AudioDevice.";
+ return false;
+ }
+
+ // The requested sample-rate must match the hardware sample-rate.
+ Float64 sample_rate = 0.0;
+ size = sizeof(sample_rate);
+
+ pa.mSelector = kAudioDevicePropertyNominalSampleRate;
+ pa.mScope = kAudioObjectPropertyScopeWildcard;
+ pa.mElement = kAudioObjectPropertyElementMaster;
+
+ result = AudioObjectGetPropertyData(
+ device_,
+ &pa,
+ 0,
+ 0,
+ &size,
+ &sample_rate);
+
+ if (result != noErr || sample_rate != format_.mSampleRate) {
+ LOG(ERROR) << "Requested sample-rate: " << format_.mSampleRate
+ << " must match the hardware sample-rate: " << sample_rate;
+ return false;
+ }
+
+ // Configure buffer frame size.
+ UInt32 frame_size = number_of_frames_;
+
+ pa.mSelector = kAudioDevicePropertyBufferFrameSize;
+ pa.mScope = kAudioDevicePropertyScopeInput;
+ pa.mElement = kAudioObjectPropertyElementMaster;
+ result = AudioObjectSetPropertyData(
+ device_,
+ &pa,
+ 0,
+ 0,
+ sizeof(frame_size),
+ &frame_size);
+
+ if (result != noErr) {
+ LOG(ERROR) << "Unable to set input buffer frame size: " << frame_size;
+ return false;
+ }
+
+ pa.mScope = kAudioDevicePropertyScopeOutput;
+ result = AudioObjectSetPropertyData(
+ device_,
+ &pa,
+ 0,
+ 0,
+ sizeof(frame_size),
+ &frame_size);
+
+ if (result != noErr) {
+ LOG(ERROR) << "Unable to set output buffer frame size: " << frame_size;
+ return false;
+ }
+
+ DVLOG(1) << "Sample rate: " << sample_rate;
+ DVLOG(1) << "Frame size: " << frame_size;
+
+ // Determine the number of input and output channels.
+ // We handle both the interleaved and non-interleaved cases.
+
+ // Get input stream configuration.
+ pa.mSelector = kAudioDevicePropertyStreamConfiguration;
+ pa.mScope = kAudioDevicePropertyScopeInput;
+ pa.mElement = kAudioObjectPropertyElementMaster;
+
+ result = AudioObjectGetPropertyDataSize(device_, &pa, 0, 0, &size);
+ OSSTATUS_DCHECK(result == noErr, result);
+
+ if (result == noErr && size > 0) {
+ // Allocate storage.
+ scoped_ptr<uint8[]> input_list_storage(new uint8[size]);
+ AudioBufferList& input_list =
+ *reinterpret_cast<AudioBufferList*>(input_list_storage.get());
+
+ result = AudioObjectGetPropertyData(
+ device_,
+ &pa,
+ 0,
+ 0,
+ &size,
+ &input_list);
+ OSSTATUS_DCHECK(result == noErr, result);
+
+ if (result == noErr) {
+ // Determine number of input channels.
+ input_channels_per_frame_ = input_list.mNumberBuffers > 0 ?
+ input_list.mBuffers[0].mNumberChannels : 0;
+ if (input_channels_per_frame_ == 1 && input_list.mNumberBuffers > 1) {
+ // Non-interleaved.
+ input_channels_ = input_list.mNumberBuffers;
+ } else {
+ // Interleaved.
+ input_channels_ = input_channels_per_frame_;
+ }
+ }
+ }
+
+ DVLOG(1) << "Input channels: " << input_channels_;
+ DVLOG(1) << "Input channels per frame: " << input_channels_per_frame_;
+
+ // The hardware must have at least the requested input channels.
+ if (result != noErr || client_input_channels_ > input_channels_) {
+ LOG(ERROR) << "AudioDevice does not support requested input channels.";
+ return false;
+ }
+
+ // Get output stream configuration.
+ pa.mSelector = kAudioDevicePropertyStreamConfiguration;
+ pa.mScope = kAudioDevicePropertyScopeOutput;
+ pa.mElement = kAudioObjectPropertyElementMaster;
+
+ result = AudioObjectGetPropertyDataSize(device_, &pa, 0, 0, &size);
+ OSSTATUS_DCHECK(result == noErr, result);
+
+ if (result == noErr && size > 0) {
+ // Allocate storage.
+ scoped_ptr<uint8[]> output_list_storage(new uint8[size]);
+ AudioBufferList& output_list =
+ *reinterpret_cast<AudioBufferList*>(output_list_storage.get());
+
+ result = AudioObjectGetPropertyData(
+ device_,
+ &pa,
+ 0,
+ 0,
+ &size,
+ &output_list);
+ OSSTATUS_DCHECK(result == noErr, result);
+
+ if (result == noErr) {
+ // Determine number of output channels.
+ output_channels_per_frame_ = output_list.mBuffers[0].mNumberChannels;
+ if (output_channels_per_frame_ == 1 && output_list.mNumberBuffers > 1) {
+ // Non-interleaved.
+ output_channels_ = output_list.mNumberBuffers;
+ } else {
+ // Interleaved.
+ output_channels_ = output_channels_per_frame_;
+ }
+ }
+ }
+
+ DVLOG(1) << "Output channels: " << output_channels_;
+ DVLOG(1) << "Output channels per frame: " << output_channels_per_frame_;
+
+ // The hardware must have at least the requested output channels.
+ if (result != noErr ||
+ output_channels_ < static_cast<int>(format_.mChannelsPerFrame)) {
+ LOG(ERROR) << "AudioDevice does not support requested output channels.";
+ return false;
+ }
+
+ // Setup the I/O proc.
+ result = AudioDeviceCreateIOProcID(device_, RenderProc, this, &io_proc_id_);
+ if (result != noErr) {
+ LOG(ERROR) << "Error creating IOProc.";
+ return false;
+ }
+
+ return true;
+}
+
+void AudioHardwareUnifiedStream::Close() {
+ DCHECK(!is_playing_);
+
+ OSStatus result = AudioDeviceDestroyIOProcID(device_, io_proc_id_);
+ OSSTATUS_DCHECK(result == noErr, result);
+
+ io_proc_id_ = 0;
+ device_ = kAudioObjectUnknown;
+
+ // Inform the audio manager that we have been closed. This can cause our
+ // destruction.
+ manager_->ReleaseOutputStream(this);
+}
+
+void AudioHardwareUnifiedStream::Start(AudioSourceCallback* callback) {
+ DCHECK(callback);
+ DCHECK_NE(device_, kAudioObjectUnknown);
+ DCHECK(!is_playing_);
+ if (device_ == kAudioObjectUnknown || is_playing_)
+ return;
+
+ source_ = callback;
+
+ OSStatus result = AudioDeviceStart(device_, io_proc_id_);
+ OSSTATUS_DCHECK(result == noErr, result);
+
+ if (result == noErr)
+ is_playing_ = true;
+}
+
+void AudioHardwareUnifiedStream::Stop() {
+ if (!is_playing_)
+ return;
+
+ if (device_ != kAudioObjectUnknown) {
+ OSStatus result = AudioDeviceStop(device_, io_proc_id_);
+ OSSTATUS_DCHECK(result == noErr, result);
+ }
+
+ is_playing_ = false;
+ source_ = NULL;
+}
+
+void AudioHardwareUnifiedStream::SetVolume(double volume) {
+ volume_ = static_cast<float>(volume);
+ // TODO(crogers): set volume property
+}
+
+void AudioHardwareUnifiedStream::GetVolume(double* volume) {
+ *volume = volume_;
+}
+
+// Pulls on our provider with optional input, asking it to render output.
+// Note to future hackers of this function: Do not add locks here because this
+// is running on a real-time thread (for low-latency).
+OSStatus AudioHardwareUnifiedStream::Render(
+ AudioDeviceID device,
+ const AudioTimeStamp* now,
+ const AudioBufferList* input_data,
+ const AudioTimeStamp* input_time,
+ AudioBufferList* output_data,
+ const AudioTimeStamp* output_time) {
+ // Convert the input data accounting for possible interleaving.
+ // TODO(crogers): it's better to simply memcpy() if source is already planar.
+ if (input_channels_ >= client_input_channels_) {
+ for (int channel_index = 0; channel_index < client_input_channels_;
+ ++channel_index) {
+ float* source;
+
+ int source_channel_index = channel_index;
+
+ if (input_channels_per_frame_ > 1) {
+ // Interleaved.
+ source = static_cast<float*>(input_data->mBuffers[0].mData) +
+ source_channel_index;
+ } else {
+ // Non-interleaved.
+ source = static_cast<float*>(
+ input_data->mBuffers[source_channel_index].mData);
+ }
+
+ float* p = input_bus_->channel(channel_index);
+ for (int i = 0; i < number_of_frames_; ++i) {
+ p[i] = *source;
+ source += input_channels_per_frame_;
+ }
+ }
+ } else if (input_channels_) {
+ input_bus_->Zero();
+ }
+
+ // Give the client optional input data and have it render the output data.
+ source_->OnMoreIOData(input_bus_.get(),
+ output_bus_.get(),
+ AudioBuffersState(0, 0));
+
+ // TODO(crogers): handle final Core Audio 5.1 layout for 5.1 audio.
+
+ // Handle interleaving as necessary.
+ // TODO(crogers): it's better to simply memcpy() if dest is already planar.
+
+ for (int channel_index = 0;
+ channel_index < static_cast<int>(format_.mChannelsPerFrame);
+ ++channel_index) {
+ float* dest;
+
+ int dest_channel_index = channel_index;
+
+ if (output_channels_per_frame_ > 1) {
+ // Interleaved.
+ dest = static_cast<float*>(output_data->mBuffers[0].mData) +
+ dest_channel_index;
+ } else {
+ // Non-interleaved.
+ dest = static_cast<float*>(
+ output_data->mBuffers[dest_channel_index].mData);
+ }
+
+ float* p = output_bus_->channel(channel_index);
+ for (int i = 0; i < number_of_frames_; ++i) {
+ *dest = p[i];
+ dest += output_channels_per_frame_;
+ }
+ }
+
+ return noErr;
+}
+
+OSStatus AudioHardwareUnifiedStream::RenderProc(
+ AudioDeviceID device,
+ const AudioTimeStamp* now,
+ const AudioBufferList* input_data,
+ const AudioTimeStamp* input_time,
+ AudioBufferList* output_data,
+ const AudioTimeStamp* output_time,
+ void* user_data) {
+ AudioHardwareUnifiedStream* audio_output =
+ static_cast<AudioHardwareUnifiedStream*>(user_data);
+ DCHECK(audio_output);
+ if (!audio_output)
+ return -1;
+
+ return audio_output->Render(
+ device,
+ now,
+ input_data,
+ input_time,
+ output_data,
+ output_time);
+}
+
+} // namespace media
diff --git a/media/audio/mac/audio_unified_mac.h b/media/audio/mac/audio_unified_mac.h
new file mode 100644
index 0000000..ff090e3
--- /dev/null
+++ b/media/audio/mac/audio_unified_mac.h
@@ -0,0 +1,100 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef MEDIA_AUDIO_MAC_AUDIO_UNIFIED_MAC_H_
+#define MEDIA_AUDIO_MAC_AUDIO_UNIFIED_MAC_H_
+
+#include <CoreAudio/CoreAudio.h>
+
+#include "base/memory/scoped_ptr.h"
+#include "media/audio/audio_io.h"
+#include "media/audio/audio_parameters.h"
+
+namespace media {
+
+class AudioManagerMac;
+
+// Implementation of AudioOutputStream for Mac OS X using the
+// CoreAudio AudioHardware API suitable for low-latency unified audio I/O
+// when using devices which support *both* input and output
+// in the same driver. This is the case with professional
+// USB and Firewire devices.
+//
+// Please note that it's required to first get the native sample-rate of the
+// default output device and use that sample-rate when creating this object.
+class AudioHardwareUnifiedStream : public AudioOutputStream {
+ public:
+ // The ctor takes all the usual parameters, plus |manager| which is the
+ // the audio manager who is creating this object.
+ AudioHardwareUnifiedStream(AudioManagerMac* manager,
+ const AudioParameters& params);
+ // The dtor is typically called by the AudioManager only and it is usually
+ // triggered by calling AudioOutputStream::Close().
+ virtual ~AudioHardwareUnifiedStream();
+
+ // Implementation of AudioOutputStream.
+ virtual bool Open() OVERRIDE;
+ virtual void Close() OVERRIDE;
+ virtual void Start(AudioSourceCallback* callback) OVERRIDE;
+ virtual void Stop() OVERRIDE;
+ virtual void SetVolume(double volume) OVERRIDE;
+ virtual void GetVolume(double* volume) OVERRIDE;
+
+ int input_channels() const { return input_channels_; }
+ int output_channels() const { return output_channels_; }
+
+ private:
+ OSStatus Render(AudioDeviceID device,
+ const AudioTimeStamp* now,
+ const AudioBufferList* input_data,
+ const AudioTimeStamp* input_time,
+ AudioBufferList* output_data,
+ const AudioTimeStamp* output_time);
+
+ static OSStatus RenderProc(AudioDeviceID device,
+ const AudioTimeStamp* now,
+ const AudioBufferList* input_data,
+ const AudioTimeStamp* input_time,
+ AudioBufferList* output_data,
+ const AudioTimeStamp* output_time,
+ void* user_data);
+
+ // Our creator, the audio manager needs to be notified when we close.
+ AudioManagerMac* manager_;
+
+ // Pointer to the object that will provide the audio samples.
+ AudioSourceCallback* source_;
+
+ // Structure that holds the stream format details such as bitrate.
+ AudioStreamBasicDescription format_;
+
+ // Hardware buffer size.
+ int number_of_frames_;
+
+ // Number of audio channels provided to the client via OnMoreIOData().
+ int client_input_channels_;
+
+ // Volume level from 0 to 1.
+ float volume_;
+
+ // Number of input and output channels queried from the hardware.
+ int input_channels_;
+ int output_channels_;
+ int input_channels_per_frame_;
+ int output_channels_per_frame_;
+
+ AudioDeviceIOProcID io_proc_id_;
+ AudioDeviceID device_;
+ bool is_playing_;
+
+ // Intermediate buffers used with call to OnMoreIOData().
+ scoped_ptr<AudioBus> input_bus_;
+ scoped_ptr<AudioBus> output_bus_;
+
+ DISALLOW_COPY_AND_ASSIGN(AudioHardwareUnifiedStream);
+};
+
+} // namespace media
+
+#endif // MEDIA_AUDIO_MAC_AUDIO_UNIFIED_MAC_H_