summaryrefslogtreecommitdiffstats
path: root/media
diff options
context:
space:
mode:
authorcrogers@google.com <crogers@google.com@0039d316-1c4b-4281-b951-d872f2087c98>2012-09-18 00:15:59 +0000
committercrogers@google.com <crogers@google.com@0039d316-1c4b-4281-b951-d872f2087c98>2012-09-18 00:15:59 +0000
commita39b38afc2d3d96e86c4bdd81422eef012802625 (patch)
tree3a96011bcc445c56cf710e90681dabc291f8e396 /media
parent6927f2e012b2fa5b0df1cec1d62af2bfe5ffae34 (diff)
downloadchromium_src-a39b38afc2d3d96e86c4bdd81422eef012802625.zip
chromium_src-a39b38afc2d3d96e86c4bdd81422eef012802625.tar.gz
chromium_src-a39b38afc2d3d96e86c4bdd81422eef012802625.tar.bz2
Add Mac OS X synchronized audio I/O back-end
AudioSynchronizedStream is an implementation of AudioOuputStream for Mac OS X when using an input and output which are *not* unified in the same driver. This requires managing a separate input and output thread for each of the drivers and synchronizing them with a FIFO and varispeed. This synchronization involves two threads, and requires that the FIFO be made thread-safe in the sense that one thread may call AudioFifo::Push() while a second thread calls AudioFifo::Consume(). It is not acceptable to simply require the client to put locks around these calls because they can (and will) be contended (causing glitches) since both threads are real-time audio threads. BUG=none TEST=extensive manual testing on various machines, audio hardware, and input/output sample-rates R=scherkus Review URL: https://chromiumcodereview.appspot.com/10909185 git-svn-id: svn://svn.chromium.org/chrome/trunk/src@157251 0039d316-1c4b-4281-b951-d872f2087c98
Diffstat (limited to 'media')
-rw-r--r--media/audio/audio_output_resampler.cc17
-rw-r--r--media/audio/audio_output_resampler.h5
-rw-r--r--media/audio/mac/audio_manager_mac.cc57
-rw-r--r--media/audio/mac/audio_synchronized_mac.cc945
-rw-r--r--media/audio/mac/audio_synchronized_mac.h210
-rw-r--r--media/base/audio_bus.cc34
-rw-r--r--media/base/audio_bus.h11
-rw-r--r--media/base/audio_fifo.cc32
-rw-r--r--media/base/audio_fifo.h10
-rw-r--r--media/media.gyp2
10 files changed, 1303 insertions, 20 deletions
diff --git a/media/audio/audio_output_resampler.cc b/media/audio/audio_output_resampler.cc
index 14f29c9..87463c9 100644
--- a/media/audio/audio_output_resampler.cc
+++ b/media/audio/audio_output_resampler.cc
@@ -289,7 +289,7 @@ int AudioOutputResampler::OnMoreIOData(AudioBus* source,
if (!resampler_.get() && !audio_fifo_.get()) {
// We have no internal buffers, so clear any outstanding audio data.
outstanding_audio_bytes_ = 0;
- SourceCallback_Locked(dest);
+ SourceIOCallback_Locked(source, dest);
return dest->frames();
}
@@ -315,7 +315,12 @@ int AudioOutputResampler::OnMoreIOData(AudioBus* source,
return dest->frames();
}
-void AudioOutputResampler::SourceCallback_Locked(AudioBus* audio_bus) {
+void AudioOutputResampler::SourceCallback_Locked(AudioBus* dest) {
+ SourceIOCallback_Locked(NULL, dest);
+}
+
+void AudioOutputResampler::SourceIOCallback_Locked(
+ AudioBus* source, AudioBus* dest) {
source_lock_.AssertAcquired();
// Adjust playback delay to include the state of the internal buffers used by
@@ -326,14 +331,14 @@ void AudioOutputResampler::SourceCallback_Locked(AudioBus* audio_bus) {
(current_buffers_state_.total_bytes() + outstanding_audio_bytes_);
// Retrieve data from the original callback. Zero any unfilled frames.
- int frames = source_callback_->OnMoreData(audio_bus, new_buffers_state);
- if (frames < audio_bus->frames())
- audio_bus->ZeroFramesPartial(frames, audio_bus->frames() - frames);
+ int frames = source_callback_->OnMoreIOData(source, dest, new_buffers_state);
+ if (frames < dest->frames())
+ dest->ZeroFramesPartial(frames, dest->frames() - frames);
// Scale the number of frames we got back in terms of input bytes to output
// bytes accordingly.
outstanding_audio_bytes_ +=
- (audio_bus->frames() * params_.GetBytesPerFrame()) / io_ratio_;
+ (dest->frames() * params_.GetBytesPerFrame()) / io_ratio_;
}
void AudioOutputResampler::ProvideInput(AudioBus* audio_bus) {
diff --git a/media/audio/audio_output_resampler.h b/media/audio/audio_output_resampler.h
index cd2f1dd..3e6bd66 100644
--- a/media/audio/audio_output_resampler.h
+++ b/media/audio/audio_output_resampler.h
@@ -82,7 +82,10 @@ class MEDIA_EXPORT AudioOutputResampler
// Called by AudioPullFifo when more data is necessary. Requires
// |source_lock_| to have been acquired.
- void SourceCallback_Locked(AudioBus* audio_bus);
+ void SourceCallback_Locked(AudioBus* dest);
+
+ // Passes through |source| to the |source_callback_| OnMoreIOData() call.
+ void SourceIOCallback_Locked(AudioBus* source, AudioBus* dest);
// Used by StopStream()/CloseStream()/Shutdown() to clear internal state.
// TODO(dalecurtis): Probably only one of these methods needs to call this,
diff --git a/media/audio/mac/audio_manager_mac.cc b/media/audio/mac/audio_manager_mac.cc
index cb259ff..d9060f1 100644
--- a/media/audio/mac/audio_manager_mac.cc
+++ b/media/audio/mac/audio_manager_mac.cc
@@ -6,6 +6,7 @@
#include <string>
+#include "base/command_line.h"
#include "base/mac/mac_logging.h"
#include "base/mac/scoped_cftyperef.h"
#include "base/sys_string_conversions.h"
@@ -14,7 +15,10 @@
#include "media/audio/mac/audio_low_latency_output_mac.h"
#include "media/audio/mac/audio_manager_mac.h"
#include "media/audio/mac/audio_output_mac.h"
+#include "media/audio/mac/audio_synchronized_mac.h"
+#include "media/audio/mac/audio_unified_mac.h"
#include "media/base/limits.h"
+#include "media/base/media_switches.h"
namespace media {
@@ -39,6 +43,45 @@ static bool HasAudioHardware(AudioObjectPropertySelector selector) {
output_device_id != kAudioObjectUnknown;
}
+// Returns true if the default input device is the same as
+// the default output device.
+static bool HasUnifiedDefaultIO() {
+ AudioDeviceID input_id, output_id;
+
+ AudioObjectPropertyAddress pa;
+ pa.mSelector = kAudioHardwarePropertyDefaultInputDevice;
+ pa.mScope = kAudioObjectPropertyScopeGlobal;
+ pa.mElement = kAudioObjectPropertyElementMaster;
+ UInt32 size = sizeof(input_id);
+
+ // Get the default input.
+ OSStatus result = AudioObjectGetPropertyData(
+ kAudioObjectSystemObject,
+ &pa,
+ 0,
+ 0,
+ &size,
+ &input_id);
+
+ if (result != noErr)
+ return false;
+
+ // Get the default output.
+ pa.mSelector = kAudioHardwarePropertyDefaultOutputDevice;
+ result = AudioObjectGetPropertyData(
+ kAudioObjectSystemObject,
+ &pa,
+ 0,
+ 0,
+ &size,
+ &output_id);
+
+ if (result != noErr)
+ return false;
+
+ return input_id == output_id;
+}
+
static void GetAudioDeviceInfo(bool is_input,
media::AudioDeviceNames* device_names) {
DCHECK(device_names);
@@ -250,6 +293,20 @@ AudioOutputStream* AudioManagerMac::MakeLinearOutputStream(
AudioOutputStream* AudioManagerMac::MakeLowLatencyOutputStream(
const AudioParameters& params) {
DCHECK_EQ(AudioParameters::AUDIO_PCM_LOW_LATENCY, params.format());
+
+ // TODO(crogers): remove once we properly handle input device selection.
+ if (CommandLine::ForCurrentProcess()->HasSwitch(
+ switches::kEnableWebAudioInput)) {
+ if (HasUnifiedDefaultIO())
+ return new AudioHardwareUnifiedStream(this, params);
+
+ // kAudioDeviceUnknown translates to "use default" here.
+ return new AudioSynchronizedStream(this,
+ params,
+ kAudioDeviceUnknown,
+ kAudioDeviceUnknown);
+ }
+
return new AUAudioOutputStream(this, params);
}
diff --git a/media/audio/mac/audio_synchronized_mac.cc b/media/audio/mac/audio_synchronized_mac.cc
new file mode 100644
index 0000000..3861bcb
--- /dev/null
+++ b/media/audio/mac/audio_synchronized_mac.cc
@@ -0,0 +1,945 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "media/audio/mac/audio_synchronized_mac.h"
+
+#include <CoreServices/CoreServices.h>
+#include <algorithm>
+
+#include "base/basictypes.h"
+#include "base/debug/trace_event.h"
+#include "base/logging.h"
+#include "base/mac/mac_logging.h"
+#include "media/audio/audio_util.h"
+#include "media/audio/mac/audio_manager_mac.h"
+
+namespace media {
+
+static const int kHardwareBufferSize = 128;
+static const int kFifoSize = 16384;
+
+// TODO(crogers): handle the non-stereo case.
+static const int kChannels = 2;
+
+// This value was determined empirically for minimum latency while still
+// guarding against FIFO under-runs.
+static const int kBaseTargetFifoFrames = 256 + 64;
+
+// If the input and output sample-rate don't match, then we need to maintain
+// an additional safety margin due to the callback timing jitter and the
+// varispeed buffering. This value was empirically tuned.
+static const int kAdditionalTargetFifoFrames = 128;
+
+static void ZeroBufferList(AudioBufferList* buffer_list) {
+ for (size_t i = 0; i < buffer_list->mNumberBuffers; ++i)
+ memset(buffer_list->mBuffers[i].mData,
+ 0,
+ buffer_list->mBuffers[i].mDataByteSize);
+}
+
+static void WrapBufferList(AudioBufferList* buffer_list,
+ AudioBus* bus,
+ int frames) {
+ DCHECK(buffer_list);
+ DCHECK(bus);
+ int channels = bus->channels();
+ int buffer_list_channels = buffer_list->mNumberBuffers;
+
+ // Copy pointers from AudioBufferList.
+ int source_idx = 0;
+ for (int i = 0; i < channels; ++i) {
+ bus->SetChannelData(
+ i, static_cast<float*>(buffer_list->mBuffers[source_idx].mData));
+
+ // It's ok to pass in a |buffer_list| with fewer channels, in which
+ // case we just duplicate the last channel.
+ if (source_idx < buffer_list_channels - 1)
+ ++source_idx;
+ }
+
+ // Finally set the actual length.
+ bus->set_frames(frames);
+}
+
+AudioSynchronizedStream::AudioSynchronizedStream(
+ AudioManagerMac* manager,
+ const AudioParameters& params,
+ AudioDeviceID input_id,
+ AudioDeviceID output_id)
+ : manager_(manager),
+ params_(params),
+ input_sample_rate_(0),
+ output_sample_rate_(0),
+ input_id_(input_id),
+ output_id_(output_id),
+ input_buffer_list_(NULL),
+ fifo_(kChannels, kFifoSize),
+ target_fifo_frames_(kBaseTargetFifoFrames),
+ average_delta_(0.0),
+ fifo_rate_compensation_(1.0),
+ input_unit_(0),
+ varispeed_unit_(0),
+ output_unit_(0),
+ first_input_time_(-1),
+ is_running_(false),
+ hardware_buffer_size_(kHardwareBufferSize),
+ channels_(kChannels) {
+}
+
+AudioSynchronizedStream::~AudioSynchronizedStream() {
+ DCHECK(!input_unit_);
+ DCHECK(!output_unit_);
+ DCHECK(!varispeed_unit_);
+}
+
+bool AudioSynchronizedStream::Open() {
+ if (params_.channels() != kChannels) {
+ LOG(ERROR) << "Only stereo output is currently supported.";
+ return false;
+ }
+
+ // Create the input, output, and varispeed AudioUnits.
+ OSStatus result = CreateAudioUnits();
+ if (result != noErr) {
+ LOG(ERROR) << "Cannot create AudioUnits.";
+ return false;
+ }
+
+ result = SetupInput(input_id_);
+ if (result != noErr) {
+ LOG(ERROR) << "Error configuring input AudioUnit.";
+ return false;
+ }
+
+ result = SetupOutput(output_id_);
+ if (result != noErr) {
+ LOG(ERROR) << "Error configuring output AudioUnit.";
+ return false;
+ }
+
+ result = SetupCallbacks();
+ if (result != noErr) {
+ LOG(ERROR) << "Error setting up callbacks on AudioUnits.";
+ return false;
+ }
+
+ result = SetupStreamFormats();
+ if (result != noErr) {
+ LOG(ERROR) << "Error configuring stream formats on AudioUnits.";
+ return false;
+ }
+
+ AllocateInputData();
+
+ // Final initialization of the AudioUnits.
+ result = AudioUnitInitialize(input_unit_);
+ if (result != noErr) {
+ LOG(ERROR) << "Error initializing input AudioUnit.";
+ return false;
+ }
+
+ result = AudioUnitInitialize(output_unit_);
+ if (result != noErr) {
+ LOG(ERROR) << "Error initializing output AudioUnit.";
+ return false;
+ }
+
+ result = AudioUnitInitialize(varispeed_unit_);
+ if (result != noErr) {
+ LOG(ERROR) << "Error initializing varispeed AudioUnit.";
+ return false;
+ }
+
+ if (input_sample_rate_ != output_sample_rate_) {
+ // Add extra safety margin.
+ target_fifo_frames_ += kAdditionalTargetFifoFrames;
+ }
+
+ // Buffer initial silence corresponding to target I/O buffering.
+ fifo_.Clear();
+ scoped_ptr<AudioBus> silence =
+ AudioBus::Create(channels_, target_fifo_frames_);
+ silence->Zero();
+ fifo_.Push(silence.get());
+
+ return true;
+}
+
+void AudioSynchronizedStream::Close() {
+ DCHECK(!is_running_);
+
+ if (input_buffer_list_) {
+ free(input_buffer_list_);
+ input_buffer_list_ = 0;
+ input_bus_.reset(NULL);
+ wrapper_bus_.reset(NULL);
+ }
+
+ if (input_unit_) {
+ AudioUnitUninitialize(input_unit_);
+ CloseComponent(input_unit_);
+ }
+
+ if (output_unit_) {
+ AudioUnitUninitialize(output_unit_);
+ CloseComponent(output_unit_);
+ }
+
+ if (varispeed_unit_) {
+ AudioUnitUninitialize(varispeed_unit_);
+ CloseComponent(varispeed_unit_);
+ }
+
+ input_unit_ = NULL;
+ output_unit_ = NULL;
+ varispeed_unit_ = NULL;
+
+ // Inform the audio manager that we have been closed. This can cause our
+ // destruction.
+ manager_->ReleaseOutputStream(this);
+}
+
+void AudioSynchronizedStream::Start(AudioSourceCallback* callback) {
+ DCHECK(callback);
+ DCHECK(input_unit_);
+ DCHECK(output_unit_);
+ DCHECK(varispeed_unit_);
+
+ if (is_running_ || !input_unit_ || !output_unit_ || !varispeed_unit_)
+ return;
+
+ source_ = callback;
+
+ // Reset state variables each time we Start().
+ fifo_rate_compensation_ = 1.0;
+ average_delta_ = 0.0;
+
+ OSStatus result = noErr;
+
+ if (!is_running_) {
+ first_input_time_ = -1;
+
+ result = AudioOutputUnitStart(input_unit_);
+ OSSTATUS_DCHECK(result == noErr, result);
+
+ if (result == noErr) {
+ result = AudioOutputUnitStart(output_unit_);
+ OSSTATUS_DCHECK(result == noErr, result);
+ }
+ }
+
+ is_running_ = true;
+}
+
+void AudioSynchronizedStream::Stop() {
+ OSStatus result = noErr;
+ if (is_running_) {
+ result = AudioOutputUnitStop(input_unit_);
+ OSSTATUS_DCHECK(result == noErr, result);
+
+ if (result == noErr) {
+ result = AudioOutputUnitStop(output_unit_);
+ OSSTATUS_DCHECK(result == noErr, result);
+ }
+ }
+
+ if (result == noErr)
+ is_running_ = false;
+}
+
+bool AudioSynchronizedStream::IsRunning() {
+ return is_running_;
+}
+
+// TODO(crogers): implement - or remove from AudioOutputStream.
+void AudioSynchronizedStream::SetVolume(double volume) {}
+void AudioSynchronizedStream::GetVolume(double* volume) {}
+
+OSStatus AudioSynchronizedStream::SetOutputDeviceAsCurrent(
+ AudioDeviceID output_id) {
+ OSStatus result = noErr;
+
+ // Get the default output device if device is unknown.
+ if (output_id == kAudioDeviceUnknown) {
+ AudioObjectPropertyAddress pa;
+ pa.mSelector = kAudioHardwarePropertyDefaultOutputDevice;
+ pa.mScope = kAudioObjectPropertyScopeGlobal;
+ pa.mElement = kAudioObjectPropertyElementMaster;
+ UInt32 size = sizeof(output_id);
+
+ result = AudioObjectGetPropertyData(
+ kAudioObjectSystemObject,
+ &pa,
+ 0,
+ 0,
+ &size,
+ &output_id);
+
+ OSSTATUS_DCHECK(result == noErr, result);
+ if (result != noErr)
+ return result;
+ }
+
+ // Set the render frame size.
+ UInt32 frame_size = hardware_buffer_size_;
+ AudioObjectPropertyAddress pa;
+ pa.mSelector = kAudioDevicePropertyBufferFrameSize;
+ pa.mScope = kAudioDevicePropertyScopeInput;
+ pa.mElement = kAudioObjectPropertyElementMaster;
+ result = AudioObjectSetPropertyData(
+ output_id,
+ &pa,
+ 0,
+ 0,
+ sizeof(frame_size),
+ &frame_size);
+
+ OSSTATUS_DCHECK(result == noErr, result);
+ if (result != noErr)
+ return result;
+
+ output_info_.Initialize(output_id, false);
+
+ // Set the Current Device to the Default Output Unit.
+ result = AudioUnitSetProperty(
+ output_unit_,
+ kAudioOutputUnitProperty_CurrentDevice,
+ kAudioUnitScope_Global,
+ 0,
+ &output_info_.id_,
+ sizeof(output_info_.id_));
+
+ OSSTATUS_DCHECK(result == noErr, result);
+ return result;
+}
+
+OSStatus AudioSynchronizedStream::SetInputDeviceAsCurrent(
+ AudioDeviceID input_id) {
+ OSStatus result = noErr;
+
+ // Get the default input device if device is unknown.
+ if (input_id == kAudioDeviceUnknown) {
+ AudioObjectPropertyAddress pa;
+ pa.mSelector = kAudioHardwarePropertyDefaultInputDevice;
+ pa.mScope = kAudioObjectPropertyScopeGlobal;
+ pa.mElement = kAudioObjectPropertyElementMaster;
+ UInt32 size = sizeof(input_id);
+
+ result = AudioObjectGetPropertyData(
+ kAudioObjectSystemObject,
+ &pa,
+ 0,
+ 0,
+ &size,
+ &input_id);
+
+ OSSTATUS_DCHECK(result == noErr, result);
+ if (result != noErr)
+ return result;
+ }
+
+ // Set the render frame size.
+ UInt32 frame_size = hardware_buffer_size_;
+ AudioObjectPropertyAddress pa;
+ pa.mSelector = kAudioDevicePropertyBufferFrameSize;
+ pa.mScope = kAudioDevicePropertyScopeInput;
+ pa.mElement = kAudioObjectPropertyElementMaster;
+ result = AudioObjectSetPropertyData(
+ input_id,
+ &pa,
+ 0,
+ 0,
+ sizeof(frame_size),
+ &frame_size);
+
+ OSSTATUS_DCHECK(result == noErr, result);
+ if (result != noErr)
+ return result;
+
+ input_info_.Initialize(input_id, true);
+
+ // Set the Current Device to the AUHAL.
+ // This should be done only after I/O has been enabled on the AUHAL.
+ result = AudioUnitSetProperty(
+ input_unit_,
+ kAudioOutputUnitProperty_CurrentDevice,
+ kAudioUnitScope_Global,
+ 0,
+ &input_info_.id_,
+ sizeof(input_info_.id_));
+
+ OSSTATUS_DCHECK(result == noErr, result);
+ return result;
+}
+
+OSStatus AudioSynchronizedStream::CreateAudioUnits() {
+ // Q: Why do we need a varispeed unit?
+ // A: If the input device and the output device are running at
+ // different sample rates and/or on different clocks, we will need
+ // to compensate to avoid a pitch change and
+ // to avoid buffer under and over runs.
+ ComponentDescription varispeed_desc;
+ varispeed_desc.componentType = kAudioUnitType_FormatConverter;
+ varispeed_desc.componentSubType = kAudioUnitSubType_Varispeed;
+ varispeed_desc.componentManufacturer = kAudioUnitManufacturer_Apple;
+ varispeed_desc.componentFlags = 0;
+ varispeed_desc.componentFlagsMask = 0;
+
+ Component varispeed_comp = FindNextComponent(NULL, &varispeed_desc);
+ if (varispeed_comp == NULL)
+ return -1;
+
+ OSStatus result = OpenAComponent(varispeed_comp, &varispeed_unit_);
+ OSSTATUS_DCHECK(result == noErr, result);
+ if (result != noErr)
+ return result;
+
+ // Open input AudioUnit.
+ ComponentDescription input_desc;
+ input_desc.componentType = kAudioUnitType_Output;
+ input_desc.componentSubType = kAudioUnitSubType_HALOutput;
+ input_desc.componentManufacturer = kAudioUnitManufacturer_Apple;
+ input_desc.componentFlags = 0;
+ input_desc.componentFlagsMask = 0;
+
+ Component input_comp = FindNextComponent(NULL, &input_desc);
+ if (input_comp == NULL)
+ return -1;
+
+ result = OpenAComponent(input_comp, &input_unit_);
+ OSSTATUS_DCHECK(result == noErr, result);
+ if (result != noErr)
+ return result;
+
+ // Open output AudioUnit.
+ ComponentDescription output_desc;
+ output_desc.componentType = kAudioUnitType_Output;
+ output_desc.componentSubType = kAudioUnitSubType_DefaultOutput;
+ output_desc.componentManufacturer = kAudioUnitManufacturer_Apple;
+ output_desc.componentFlags = 0;
+ output_desc.componentFlagsMask = 0;
+
+ Component output_comp = FindNextComponent(NULL, &output_desc);
+ if (output_comp == NULL)
+ return -1;
+
+ result = OpenAComponent(output_comp, &output_unit_);
+ OSSTATUS_DCHECK(result == noErr, result);
+ if (result != noErr)
+ return result;
+
+ return noErr;
+}
+
+OSStatus AudioSynchronizedStream::SetupInput(AudioDeviceID input_id) {
+ // The AUHAL used for input needs to be initialized
+ // before anything is done to it.
+ OSStatus result = AudioUnitInitialize(input_unit_);
+ OSSTATUS_DCHECK(result == noErr, result);
+ if (result != noErr)
+ return result;
+
+ // We must enable the Audio Unit (AUHAL) for input and disable output
+ // BEFORE setting the AUHAL's current device.
+ result = EnableIO();
+ OSSTATUS_DCHECK(result == noErr, result);
+ if (result != noErr)
+ return result;
+
+ result = SetInputDeviceAsCurrent(input_id);
+ OSSTATUS_DCHECK(result == noErr, result);
+
+ return result;
+}
+
+OSStatus AudioSynchronizedStream::EnableIO() {
+ // Enable input on the AUHAL.
+ UInt32 enable_io = 1;
+ OSStatus result = AudioUnitSetProperty(
+ input_unit_,
+ kAudioOutputUnitProperty_EnableIO,
+ kAudioUnitScope_Input,
+ 1, // input element
+ &enable_io,
+ sizeof(enable_io));
+
+ OSSTATUS_DCHECK(result == noErr, result);
+ if (result != noErr)
+ return result;
+
+ // Disable Output on the AUHAL.
+ enable_io = 0;
+ result = AudioUnitSetProperty(
+ input_unit_,
+ kAudioOutputUnitProperty_EnableIO,
+ kAudioUnitScope_Output,
+ 0, // output element
+ &enable_io,
+ sizeof(enable_io));
+
+ OSSTATUS_DCHECK(result == noErr, result);
+ return result;
+}
+
+OSStatus AudioSynchronizedStream::SetupOutput(AudioDeviceID output_id) {
+ OSStatus result = noErr;
+
+ result = SetOutputDeviceAsCurrent(output_id);
+ OSSTATUS_DCHECK(result == noErr, result);
+ if (result != noErr)
+ return result;
+
+ // Tell the output unit not to reset timestamps.
+ // Otherwise sample rate changes will cause sync loss.
+ UInt32 start_at_zero = 0;
+ result = AudioUnitSetProperty(
+ output_unit_,
+ kAudioOutputUnitProperty_StartTimestampsAtZero,
+ kAudioUnitScope_Global,
+ 0,
+ &start_at_zero,
+ sizeof(start_at_zero));
+
+ OSSTATUS_DCHECK(result == noErr, result);
+
+ return result;
+}
+
+OSStatus AudioSynchronizedStream::SetupCallbacks() {
+ // Set the input callback.
+ AURenderCallbackStruct callback;
+ callback.inputProc = InputProc;
+ callback.inputProcRefCon = this;
+ OSStatus result = AudioUnitSetProperty(
+ input_unit_,
+ kAudioOutputUnitProperty_SetInputCallback,
+ kAudioUnitScope_Global,
+ 0,
+ &callback,
+ sizeof(callback));
+
+ OSSTATUS_DCHECK(result == noErr, result);
+ if (result != noErr)
+ return result;
+
+ // Set the output callback.
+ callback.inputProc = OutputProc;
+ callback.inputProcRefCon = this;
+ result = AudioUnitSetProperty(
+ output_unit_,
+ kAudioUnitProperty_SetRenderCallback,
+ kAudioUnitScope_Input,
+ 0,
+ &callback,
+ sizeof(callback));
+
+ OSSTATUS_DCHECK(result == noErr, result);
+ if (result != noErr)
+ return result;
+
+ // Set the varispeed callback.
+ callback.inputProc = VarispeedProc;
+ callback.inputProcRefCon = this;
+ result = AudioUnitSetProperty(
+ varispeed_unit_,
+ kAudioUnitProperty_SetRenderCallback,
+ kAudioUnitScope_Input,
+ 0,
+ &callback,
+ sizeof(callback));
+
+ OSSTATUS_DCHECK(result == noErr, result);
+
+ return result;
+}
+
+OSStatus AudioSynchronizedStream::SetupStreamFormats() {
+ AudioStreamBasicDescription asbd, asbd_dev1_in, asbd_dev2_out;
+
+ // Get the Stream Format (Output client side).
+ UInt32 property_size = sizeof(asbd_dev1_in);
+ OSStatus result = AudioUnitGetProperty(
+ input_unit_,
+ kAudioUnitProperty_StreamFormat,
+ kAudioUnitScope_Input,
+ 1,
+ &asbd_dev1_in,
+ &property_size);
+
+ OSSTATUS_DCHECK(result == noErr, result);
+ if (result != noErr)
+ return result;
+
+ // Get the Stream Format (client side).
+ property_size = sizeof(asbd);
+ result = AudioUnitGetProperty(
+ input_unit_,
+ kAudioUnitProperty_StreamFormat,
+ kAudioUnitScope_Output,
+ 1,
+ &asbd,
+ &property_size);
+
+ OSSTATUS_DCHECK(result == noErr, result);
+ if (result != noErr)
+ return result;
+
+ // Get the Stream Format (Output client side).
+ property_size = sizeof(asbd_dev2_out);
+ result = AudioUnitGetProperty(
+ output_unit_,
+ kAudioUnitProperty_StreamFormat,
+ kAudioUnitScope_Output,
+ 0,
+ &asbd_dev2_out,
+ &property_size);
+
+ OSSTATUS_DCHECK(result == noErr, result);
+ if (result != noErr)
+ return result;
+
+ // Set the format of all the AUs to the input/output devices channel count.
+ // For a simple case, you want to set this to
+ // the lower of count of the channels in the input device vs output device.
+ asbd.mChannelsPerFrame = std::min(asbd_dev1_in.mChannelsPerFrame,
+ asbd_dev2_out.mChannelsPerFrame);
+
+ // We must get the sample rate of the input device and set it to the
+ // stream format of AUHAL.
+ Float64 rate = 0;
+ property_size = sizeof(rate);
+
+ AudioObjectPropertyAddress pa;
+ pa.mSelector = kAudioDevicePropertyNominalSampleRate;
+ pa.mScope = kAudioObjectPropertyScopeWildcard;
+ pa.mElement = kAudioObjectPropertyElementMaster;
+ result = AudioObjectGetPropertyData(
+ input_info_.id_,
+ &pa,
+ 0,
+ 0,
+ &property_size,
+ &rate);
+
+ OSSTATUS_DCHECK(result == noErr, result);
+ if (result != noErr)
+ return result;
+
+ input_sample_rate_ = rate;
+
+ asbd.mSampleRate = rate;
+ property_size = sizeof(asbd);
+
+ // Set the new formats to the AUs...
+ result = AudioUnitSetProperty(
+ input_unit_,
+ kAudioUnitProperty_StreamFormat,
+ kAudioUnitScope_Output,
+ 1,
+ &asbd,
+ property_size);
+
+ OSSTATUS_DCHECK(result == noErr, result);
+ if (result != noErr)
+ return result;
+
+ result = AudioUnitSetProperty(
+ varispeed_unit_,
+ kAudioUnitProperty_StreamFormat,
+ kAudioUnitScope_Input,
+ 0,
+ &asbd,
+ property_size);
+
+ OSSTATUS_DCHECK(result == noErr, result);
+ if (result != noErr)
+ return result;
+
+ // Set the correct sample rate for the output device,
+ // but keep the channel count the same.
+ property_size = sizeof(rate);
+
+ pa.mSelector = kAudioDevicePropertyNominalSampleRate;
+ pa.mScope = kAudioObjectPropertyScopeWildcard;
+ pa.mElement = kAudioObjectPropertyElementMaster;
+ result = AudioObjectGetPropertyData(
+ output_info_.id_,
+ &pa,
+ 0,
+ 0,
+ &property_size,
+ &rate);
+
+ OSSTATUS_DCHECK(result == noErr, result);
+ if (result != noErr)
+ return result;
+
+ output_sample_rate_ = rate;
+
+ // The requested sample-rate must match the hardware sample-rate.
+ if (output_sample_rate_ != params_.sample_rate()) {
+ LOG(ERROR) << "Requested sample-rate: " << params_.sample_rate()
+ << " must match the hardware sample-rate: " << output_sample_rate_;
+ return kAudioDeviceUnsupportedFormatError;
+ }
+
+ asbd.mSampleRate = rate;
+ property_size = sizeof(asbd);
+
+ // Set the new audio stream formats for the rest of the AUs...
+ result = AudioUnitSetProperty(
+ varispeed_unit_,
+ kAudioUnitProperty_StreamFormat,
+ kAudioUnitScope_Output,
+ 0,
+ &asbd,
+ property_size);
+
+ OSSTATUS_DCHECK(result == noErr, result);
+ if (result != noErr)
+ return result;
+
+ result = AudioUnitSetProperty(
+ output_unit_,
+ kAudioUnitProperty_StreamFormat,
+ kAudioUnitScope_Input,
+ 0,
+ &asbd,
+ property_size);
+
+ OSSTATUS_DCHECK(result == noErr, result);
+ return result;
+}
+
+void AudioSynchronizedStream::AllocateInputData() {
+ // Allocate storage for the AudioBufferList used for the
+ // input data from the input AudioUnit.
+ // We allocate enough space for with one AudioBuffer per channel.
+ size_t malloc_size = offsetof(AudioBufferList, mBuffers[0]) +
+ (sizeof(AudioBuffer) * channels_);
+
+ input_buffer_list_ = static_cast<AudioBufferList*>(malloc(malloc_size));
+ input_buffer_list_->mNumberBuffers = channels_;
+
+ input_bus_ = AudioBus::Create(channels_, hardware_buffer_size_);
+ wrapper_bus_ = AudioBus::CreateWrapper(channels_);
+
+ // Allocate buffers for AudioBufferList.
+ UInt32 buffer_size_bytes = input_bus_->frames() * sizeof(Float32);
+ for (size_t i = 0; i < input_buffer_list_->mNumberBuffers; ++i) {
+ input_buffer_list_->mBuffers[i].mNumberChannels = 1;
+ input_buffer_list_->mBuffers[i].mDataByteSize = buffer_size_bytes;
+ input_buffer_list_->mBuffers[i].mData = input_bus_->channel(i);
+ }
+}
+
+OSStatus AudioSynchronizedStream::HandleInputCallback(
+ AudioUnitRenderActionFlags* io_action_flags,
+ const AudioTimeStamp* time_stamp,
+ UInt32 bus_number,
+ UInt32 number_of_frames,
+ AudioBufferList* io_data) {
+ TRACE_EVENT0("audio", "AudioSynchronizedStream::HandleInputCallback");
+
+ if (first_input_time_ < 0.0)
+ first_input_time_ = time_stamp->mSampleTime;
+
+ // Get the new audio input data.
+ OSStatus result = AudioUnitRender(
+ input_unit_,
+ io_action_flags,
+ time_stamp,
+ bus_number,
+ number_of_frames,
+ input_buffer_list_);
+
+ OSSTATUS_DCHECK(result == noErr, result);
+ if (result != noErr)
+ return result;
+
+ // Buffer input into FIFO.
+ int available_frames = fifo_.max_frames() - fifo_.frames();
+ if (input_bus_->frames() <= available_frames)
+ fifo_.Push(input_bus_.get());
+
+ return result;
+}
+
+OSStatus AudioSynchronizedStream::HandleVarispeedCallback(
+ AudioUnitRenderActionFlags* io_action_flags,
+ const AudioTimeStamp* time_stamp,
+ UInt32 bus_number,
+ UInt32 number_of_frames,
+ AudioBufferList* io_data) {
+ // Create a wrapper bus on the AudioBufferList.
+ WrapBufferList(io_data, wrapper_bus_.get(), number_of_frames);
+
+ if (fifo_.frames() < static_cast<int>(number_of_frames)) {
+ // We don't DCHECK here, since this is a possible run-time condition
+ // if the machine is bogged down.
+ wrapper_bus_->Zero();
+ return noErr;
+ }
+
+ // Read from the FIFO to feed the varispeed.
+ fifo_.Consume(wrapper_bus_.get(), 0, number_of_frames);
+
+ return noErr;
+}
+
+OSStatus AudioSynchronizedStream::HandleOutputCallback(
+ AudioUnitRenderActionFlags* io_action_flags,
+ const AudioTimeStamp* time_stamp,
+ UInt32 bus_number,
+ UInt32 number_of_frames,
+ AudioBufferList* io_data) {
+ if (first_input_time_ < 0.0) {
+ // Input callback hasn't run yet -> silence.
+ ZeroBufferList(io_data);
+ return noErr;
+ }
+
+ // Use the varispeed playback rate to offset small discrepancies
+ // in hardware clocks, and also any differences in sample-rate
+ // between input and output devices.
+
+ // Calculate a varispeed rate scalar factor to compensate for drift between
+ // input and output. We use the actual number of frames still in the FIFO
+ // compared with the ideal value of |target_fifo_frames_|.
+ int delta = fifo_.frames() - target_fifo_frames_;
+
+ // Average |delta| because it can jitter back/forth quite frequently
+ // by +/- the hardware buffer-size *if* the input and output callbacks are
+ // happening at almost exactly the same time. Also, if the input and output
+ // sample-rates are different then |delta| will jitter quite a bit due to
+ // the rate conversion happening in the varispeed, plus the jittering of
+ // the callbacks. The average value is what's important here.
+ average_delta_ += (delta - average_delta_) * 0.1;
+
+ // Compute a rate compensation which always attracts us back to the
+ // |target_fifo_frames_| over a period of kCorrectionTimeSeconds.
+ const double kCorrectionTimeSeconds = 0.1;
+ double correction_time_frames = kCorrectionTimeSeconds * output_sample_rate_;
+ fifo_rate_compensation_ =
+ (correction_time_frames + average_delta_) / correction_time_frames;
+
+ // Adjust for FIFO drift.
+ OSStatus result = AudioUnitSetParameter(
+ varispeed_unit_,
+ kVarispeedParam_PlaybackRate,
+ kAudioUnitScope_Global,
+ 0,
+ fifo_rate_compensation_,
+ 0);
+
+ OSSTATUS_DCHECK(result == noErr, result);
+ if (result != noErr)
+ return result;
+
+ // Render to the output using the varispeed.
+ result = AudioUnitRender(
+ varispeed_unit_,
+ io_action_flags,
+ time_stamp,
+ 0,
+ number_of_frames,
+ io_data);
+
+ OSSTATUS_DCHECK(result == noErr, result);
+ if (result != noErr)
+ return result;
+
+ // Create a wrapper bus on the AudioBufferList.
+ WrapBufferList(io_data, wrapper_bus_.get(), number_of_frames);
+
+ // Process in-place!
+ source_->OnMoreIOData(wrapper_bus_.get(),
+ wrapper_bus_.get(),
+ AudioBuffersState(0, 0));
+
+ return noErr;
+}
+
+OSStatus AudioSynchronizedStream::InputProc(
+ void* user_data,
+ AudioUnitRenderActionFlags* io_action_flags,
+ const AudioTimeStamp* time_stamp,
+ UInt32 bus_number,
+ UInt32 number_of_frames,
+ AudioBufferList* io_data) {
+ AudioSynchronizedStream* stream =
+ static_cast<AudioSynchronizedStream*>(user_data);
+ DCHECK(stream);
+
+ return stream->HandleInputCallback(
+ io_action_flags,
+ time_stamp,
+ bus_number,
+ number_of_frames,
+ io_data);
+}
+
+OSStatus AudioSynchronizedStream::VarispeedProc(
+ void* user_data,
+ AudioUnitRenderActionFlags* io_action_flags,
+ const AudioTimeStamp* time_stamp,
+ UInt32 bus_number,
+ UInt32 number_of_frames,
+ AudioBufferList* io_data) {
+ AudioSynchronizedStream* stream =
+ static_cast<AudioSynchronizedStream*>(user_data);
+ DCHECK(stream);
+
+ return stream->HandleVarispeedCallback(
+ io_action_flags,
+ time_stamp,
+ bus_number,
+ number_of_frames,
+ io_data);
+}
+
+OSStatus AudioSynchronizedStream::OutputProc(
+ void* user_data,
+ AudioUnitRenderActionFlags* io_action_flags,
+ const AudioTimeStamp* time_stamp,
+ UInt32 bus_number,
+ UInt32 number_of_frames,
+ AudioBufferList* io_data) {
+ AudioSynchronizedStream* stream =
+ static_cast<AudioSynchronizedStream*>(user_data);
+ DCHECK(stream);
+
+ return stream->HandleOutputCallback(
+ io_action_flags,
+ time_stamp,
+ bus_number,
+ number_of_frames,
+ io_data);
+}
+
+void AudioSynchronizedStream::AudioDeviceInfo::Initialize(
+ AudioDeviceID id, bool is_input) {
+ id_ = id;
+ is_input_ = is_input;
+ if (id_ == kAudioDeviceUnknown)
+ return;
+
+ UInt32 property_size = sizeof(buffer_size_frames_);
+
+ AudioObjectPropertyAddress pa;
+ pa.mSelector = kAudioDevicePropertyBufferFrameSize;
+ pa.mScope = kAudioObjectPropertyScopeWildcard;
+ pa.mElement = kAudioObjectPropertyElementMaster;
+ OSStatus result = AudioObjectGetPropertyData(
+ id_,
+ &pa,
+ 0,
+ 0,
+ &property_size,
+ &buffer_size_frames_);
+
+ OSSTATUS_DCHECK(result == noErr, result);
+}
+
+} // namespace media
diff --git a/media/audio/mac/audio_synchronized_mac.h b/media/audio/mac/audio_synchronized_mac.h
new file mode 100644
index 0000000..e99d9c8
--- /dev/null
+++ b/media/audio/mac/audio_synchronized_mac.h
@@ -0,0 +1,210 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef MEDIA_AUDIO_MAC_AUDIO_SYNCHRONIZED_MAC_H_
+#define MEDIA_AUDIO_MAC_AUDIO_SYNCHRONIZED_MAC_H_
+
+#include <AudioToolbox/AudioToolbox.h>
+#include <AudioUnit/AudioUnit.h>
+#include <CoreAudio/CoreAudio.h>
+
+#include "base/compiler_specific.h"
+#include "base/synchronization/lock.h"
+#include "media/audio/audio_io.h"
+#include "media/audio/audio_parameters.h"
+#include "media/base/audio_bus.h"
+#include "media/base/audio_fifo.h"
+
+namespace media {
+
+class AudioManagerMac;
+
+// AudioSynchronizedStream allows arbitrary combinations of input and output
+// devices running off different clocks and using different drivers, with
+// potentially differing sample-rates. It implements AudioOutputStream
+// and shuttles its synchronized I/O data using AudioSourceCallback.
+//
+// It is required to first acquire the native sample rate of the selected
+// output device and then use the same rate when creating this object.
+//
+// ............................................................................
+// Theory of Operation:
+// .
+// INPUT THREAD . OUTPUT THREAD
+// +-----------------+ +------+ .
+// | Input AudioUnit | --> | | .
+// +-----------------+ | | .
+// | FIFO | .
+// | | +-----------+
+// | | -----> | Varispeed |
+// | | +-----------+
+// +------+ . |
+// . | +-----------+
+// . OnMoreIOData() --> | Output AU |
+// . +-----------+
+//
+// The input AudioUnit's InputProc is called on one thread which feeds the
+// FIFO. The output AudioUnit's OutputProc is called on a second thread
+// which pulls on the varispeed to get the current input data. The varispeed
+// handles mismatches between input and output sample-rate and also clock drift
+// between the input and output drivers. The varispeed consumes its data from
+// the FIFO and adjusts its rate dynamically according to the amount
+// of data buffered in the FIFO. If the FIFO starts getting too much data
+// buffered then the varispeed will speed up slightly to compensate
+// and similarly if the FIFO doesn't have enough data buffered then the
+// varispeed will slow down slightly.
+//
+// Finally, once the input data is available then OnMoreIOData() is called
+// which is given this input, and renders the output which is finally sent
+// to the Output AudioUnit.
+class AudioSynchronizedStream : public AudioOutputStream {
+ public:
+ // The ctor takes all the usual parameters, plus |manager| which is the
+ // the audio manager who is creating this object.
+ AudioSynchronizedStream(AudioManagerMac* manager,
+ const AudioParameters& params,
+ AudioDeviceID input_id,
+ AudioDeviceID output_id);
+
+ virtual ~AudioSynchronizedStream();
+
+ // Implementation of AudioOutputStream.
+ virtual bool Open() OVERRIDE;
+ virtual void Close() OVERRIDE;
+ virtual void Start(AudioSourceCallback* callback) OVERRIDE;
+ virtual void Stop() OVERRIDE;
+
+ virtual void SetVolume(double volume) OVERRIDE;
+ virtual void GetVolume(double* volume) OVERRIDE;
+
+ OSStatus SetInputDeviceAsCurrent(AudioDeviceID input_id);
+ OSStatus SetOutputDeviceAsCurrent(AudioDeviceID output_id);
+ AudioDeviceID GetInputDeviceID() { return input_info_.id_; }
+ AudioDeviceID GetOutputDeviceID() { return output_info_.id_; }
+
+ bool IsRunning();
+
+ private:
+ // Initialization.
+ OSStatus CreateAudioUnits();
+ OSStatus SetupInput(AudioDeviceID input_id);
+ OSStatus EnableIO();
+ OSStatus SetupOutput(AudioDeviceID output_id);
+ OSStatus SetupCallbacks();
+ OSStatus SetupStreamFormats();
+ void AllocateInputData();
+
+ // Handlers for the AudioUnit callbacks.
+ OSStatus HandleInputCallback(AudioUnitRenderActionFlags* io_action_flags,
+ const AudioTimeStamp* time_stamp,
+ UInt32 bus_number,
+ UInt32 number_of_frames,
+ AudioBufferList* io_data);
+
+ OSStatus HandleVarispeedCallback(AudioUnitRenderActionFlags* io_action_flags,
+ const AudioTimeStamp* time_stamp,
+ UInt32 bus_number,
+ UInt32 number_of_frames,
+ AudioBufferList* io_data);
+
+ OSStatus HandleOutputCallback(AudioUnitRenderActionFlags* io_action_flags,
+ const AudioTimeStamp* time_stamp,
+ UInt32 bus_number,
+ UInt32 number_of_frames,
+ AudioBufferList* io_data);
+
+ // AudioUnit callbacks.
+ static OSStatus InputProc(void* user_data,
+ AudioUnitRenderActionFlags* io_action_flags,
+ const AudioTimeStamp* time_stamp,
+ UInt32 bus_number,
+ UInt32 number_of_frames,
+ AudioBufferList* io_data);
+
+ static OSStatus VarispeedProc(void* user_data,
+ AudioUnitRenderActionFlags* io_action_flags,
+ const AudioTimeStamp* time_stamp,
+ UInt32 bus_number,
+ UInt32 number_of_frames,
+ AudioBufferList* io_data);
+
+ static OSStatus OutputProc(void* user_data,
+ AudioUnitRenderActionFlags* io_action_flags,
+ const AudioTimeStamp* time_stamp,
+ UInt32 bus_number,
+ UInt32 number_of_frames,
+ AudioBufferList* io_data);
+
+ // Our creator.
+ AudioManagerMac* manager_;
+
+ // Client parameters.
+ AudioParameters params_;
+
+ double input_sample_rate_;
+ double output_sample_rate_;
+
+ // Pointer to the object that will provide the audio samples.
+ AudioSourceCallback* source_;
+
+ // Values used in Open().
+ AudioDeviceID input_id_;
+ AudioDeviceID output_id_;
+
+ // The input AudioUnit renders its data here.
+ AudioBufferList* input_buffer_list_;
+
+ // Holds the actual data for |input_buffer_list_|.
+ scoped_ptr<AudioBus> input_bus_;
+
+ // Used to overlay AudioBufferLists.
+ scoped_ptr<AudioBus> wrapper_bus_;
+
+ class AudioDeviceInfo {
+ public:
+ AudioDeviceInfo()
+ : id_(kAudioDeviceUnknown),
+ is_input_(false),
+ buffer_size_frames_(0) {}
+ void Initialize(AudioDeviceID inID, bool isInput);
+ bool IsInitialized() const { return id_ != kAudioDeviceUnknown; }
+
+ AudioDeviceID id_;
+ bool is_input_;
+ UInt32 buffer_size_frames_;
+ };
+
+ AudioDeviceInfo input_info_;
+ AudioDeviceInfo output_info_;
+
+ // Used for input to output buffering.
+ AudioFifo fifo_;
+
+ // The optimal number of frames we'd like to keep in the FIFO at all times.
+ int target_fifo_frames_;
+
+ // A running average of the measured delta between actual number of frames
+ // in the FIFO versus |target_fifo_frames_|.
+ double average_delta_;
+
+ // A varispeed rate scalar which is calculated based on FIFO drift.
+ double fifo_rate_compensation_;
+
+ // AudioUnits.
+ AudioUnit input_unit_;
+ AudioUnit varispeed_unit_;
+ AudioUnit output_unit_;
+
+ double first_input_time_;
+
+ bool is_running_;
+ int hardware_buffer_size_;
+ int channels_;
+
+ DISALLOW_COPY_AND_ASSIGN(AudioSynchronizedStream);
+};
+
+} // namespace media
+
+#endif // MEDIA_AUDIO_MAC_AUDIO_SYNCHRONIZED_MAC_H_
diff --git a/media/base/audio_bus.cc b/media/base/audio_bus.cc
index 278fd93..9ff6ccd 100644
--- a/media/base/audio_bus.cc
+++ b/media/base/audio_bus.cc
@@ -105,7 +105,8 @@ static void CheckOverflow(int start_frame, int frames, int total_frames) {
}
AudioBus::AudioBus(int channels, int frames)
- : frames_(frames) {
+ : frames_(frames),
+ can_set_channel_data_(false) {
ValidateConfig(channels, frames_);
int aligned_frames = 0;
@@ -118,7 +119,8 @@ AudioBus::AudioBus(int channels, int frames)
}
AudioBus::AudioBus(int channels, int frames, float* data)
- : frames_(frames) {
+ : frames_(frames),
+ can_set_channel_data_(false) {
ValidateConfig(channels, frames_);
int aligned_frames = 0;
@@ -129,7 +131,8 @@ AudioBus::AudioBus(int channels, int frames, float* data)
AudioBus::AudioBus(int frames, const std::vector<float*>& channel_data)
: channel_data_(channel_data),
- frames_(frames) {
+ frames_(frames),
+ can_set_channel_data_(false) {
ValidateConfig(channel_data_.size(), frames_);
// Sanity check wrapped vector for alignment and channel count.
@@ -137,6 +140,14 @@ AudioBus::AudioBus(int frames, const std::vector<float*>& channel_data)
DCHECK(IsAligned(channel_data_[i]));
}
+AudioBus::AudioBus(int channels)
+ : channel_data_(channels),
+ frames_(0),
+ can_set_channel_data_(true) {
+ for (size_t i = 0; i < channel_data_.size(); ++i)
+ channel_data_[i] = NULL;
+}
+
AudioBus::~AudioBus() {}
scoped_ptr<AudioBus> AudioBus::Create(int channels, int frames) {
@@ -148,6 +159,10 @@ scoped_ptr<AudioBus> AudioBus::Create(const AudioParameters& params) {
params.channels(), params.frames_per_buffer()));
}
+scoped_ptr<AudioBus> AudioBus::CreateWrapper(int channels) {
+ return scoped_ptr<AudioBus>(new AudioBus(channels));
+}
+
scoped_ptr<AudioBus> AudioBus::WrapVector(
int frames, const std::vector<float*>& channel_data) {
return scoped_ptr<AudioBus>(new AudioBus(frames, channel_data));
@@ -170,6 +185,19 @@ scoped_ptr<AudioBus> AudioBus::WrapMemory(const AudioParameters& params,
static_cast<float*>(data)));
}
+void AudioBus::SetChannelData(int channel, float* data) {
+ CHECK(can_set_channel_data_);
+ CHECK_GE(channel, 0);
+ CHECK_LT(static_cast<size_t>(channel), channel_data_.size());
+ DCHECK(IsAligned(data));
+ channel_data_[channel] = data;
+}
+
+void AudioBus::set_frames(int frames) {
+ CHECK(can_set_channel_data_);
+ frames_ = frames;
+}
+
void AudioBus::ZeroFramesPartial(int start_frame, int frames) {
CheckOverflow(start_frame, frames, frames_);
diff --git a/media/base/audio_bus.h b/media/base/audio_bus.h
index 161bccb..49bed82 100644
--- a/media/base/audio_bus.h
+++ b/media/base/audio_bus.h
@@ -30,6 +30,11 @@ class MEDIA_EXPORT AudioBus {
static scoped_ptr<AudioBus> Create(int channels, int frames);
static scoped_ptr<AudioBus> Create(const AudioParameters& params);
+ // Creates a new AudioBus with the given number of channels, but zero length.
+ // It's expected to be used with SetChannelData() and set_frames() to
+ // wrap externally allocated memory.
+ static scoped_ptr<AudioBus> CreateWrapper(int channels);
+
// Creates a new AudioBus from an existing channel vector. Does not transfer
// ownership of |channel_data| to AudioBus; i.e., |channel_data| must outlive
// the returned AudioBus. Each channel must be aligned by kChannelAlignment.
@@ -74,9 +79,11 @@ class MEDIA_EXPORT AudioBus {
// inf, nan, or between [-1.0, 1.0]) values in the channel data.
float* channel(int channel) { return channel_data_[channel]; }
const float* channel(int channel) const { return channel_data_[channel]; }
+ void SetChannelData(int channel, float* data);
int channels() const { return channel_data_.size(); }
int frames() const { return frames_; }
+ void set_frames(int frames);
// Helper method for zeroing out all channels of audio data.
void Zero();
@@ -90,6 +97,7 @@ class MEDIA_EXPORT AudioBus {
AudioBus(int channels, int frames);
AudioBus(int channels, int frames, float* data);
AudioBus(int frames, const std::vector<float*>& channel_data);
+ explicit AudioBus(int channels);
// Helper method for building |channel_data_| from a block of memory. |data|
// must be at least BlockSize() bytes in size.
@@ -101,6 +109,9 @@ class MEDIA_EXPORT AudioBus {
std::vector<float*> channel_data_;
int frames_;
+ // Protect SetChannelData() and set_frames() for use by CreateWrapper().
+ bool can_set_channel_data_;
+
DISALLOW_COPY_AND_ASSIGN(AudioBus);
};
diff --git a/media/base/audio_fifo.cc b/media/base/audio_fifo.cc
index 71085c0..b6e8f80 100644
--- a/media/base/audio_fifo.cc
+++ b/media/base/audio_fifo.cc
@@ -6,6 +6,9 @@
#include "base/logging.h"
+using base::subtle::Atomic32;
+using base::subtle::NoBarrier_Store;
+
namespace media {
// Given current position in the FIFO, the maximum number of elements in the
@@ -40,19 +43,26 @@ static int UpdatePos(int pos, int step, int max_size) {
AudioFifo::AudioFifo(int channels, int frames)
: audio_bus_(AudioBus::Create(channels, frames)),
max_frames_(frames),
- frames_(0),
+ frames_pushed_(0),
+ frames_consumed_(0),
read_pos_(0),
write_pos_(0) {}
AudioFifo::~AudioFifo() {}
+int AudioFifo::frames() const {
+ int delta = frames_pushed_ - frames_consumed_;
+ base::subtle::MemoryBarrier();
+ return delta;
+}
+
void AudioFifo::Push(const AudioBus* source) {
DCHECK(source);
DCHECK_EQ(source->channels(), audio_bus_->channels());
// Ensure that there is space for the new data in the FIFO.
const int source_size = source->frames();
- CHECK_LE(source_size + frames_, max_frames_);
+ CHECK_LE(source_size + frames(), max_frames_);
// Figure out if wrapping is needed and if so what segment sizes we need
// when adding the new audio bus content to the FIFO.
@@ -73,8 +83,13 @@ void AudioFifo::Push(const AudioBus* source) {
}
}
- frames_ += source_size;
- DCHECK_LE(frames_, max_frames());
+ // Ensure the data is *really* written before updating |frames_pushed_|.
+ base::subtle::MemoryBarrier();
+
+ Atomic32 new_frames_pushed = frames_pushed_ + source_size;
+ NoBarrier_Store(&frames_pushed_, new_frames_pushed);
+
+ DCHECK_LE(frames(), max_frames());
write_pos_ = UpdatePos(write_pos_, source_size, max_frames());
}
@@ -85,7 +100,7 @@ void AudioFifo::Consume(AudioBus* destination,
DCHECK_EQ(destination->channels(), audio_bus_->channels());
// It is not possible to ask for more data than what is available in the FIFO.
- CHECK_LE(frames_to_consume, frames_);
+ CHECK_LE(frames_to_consume, frames());
// A copy from the FIFO to |destination| will only be performed if the
// allocated memory in |destination| is sufficient.
@@ -113,12 +128,15 @@ void AudioFifo::Consume(AudioBus* destination,
}
}
- frames_ -= frames_to_consume;
+ Atomic32 new_frames_consumed = frames_consumed_ + frames_to_consume;
+ NoBarrier_Store(&frames_consumed_, new_frames_consumed);
+
read_pos_ = UpdatePos(read_pos_, frames_to_consume, max_frames());
}
void AudioFifo::Clear() {
- frames_ = 0;
+ frames_pushed_ = 0;
+ frames_consumed_ = 0;
read_pos_ = 0;
write_pos_ = 0;
}
diff --git a/media/base/audio_fifo.h b/media/base/audio_fifo.h
index 7f654b2..e978ace 100644
--- a/media/base/audio_fifo.h
+++ b/media/base/audio_fifo.h
@@ -5,6 +5,7 @@
#ifndef MEDIA_BASE_AUDIO_FIFO_H_
#define MEDIA_BASE_AUDIO_FIFO_H_
+#include "base/atomicops.h"
#include "media/base/audio_bus.h"
#include "media/base/media_export.h"
@@ -14,6 +15,8 @@ namespace media {
// The maximum number of audio frames in the FIFO is set at construction and
// can not be extended dynamically. The allocated memory is utilized as a
// ring buffer.
+// This class is thread-safe in the limited sense that one thread may call
+// Push(), while a second thread calls Consume().
class MEDIA_EXPORT AudioFifo {
public:
// Creates a new AudioFifo and allocates |channels| of length |frames|.
@@ -35,11 +38,11 @@ class MEDIA_EXPORT AudioFifo {
void Clear();
// Number of actual audio frames in the FIFO.
- int frames() const { return frames_; }
+ int frames() const;
- private:
int max_frames() const { return max_frames_; }
+ private:
// The actual FIFO is an audio bus implemented as a ring buffer.
scoped_ptr<AudioBus> audio_bus_;
@@ -48,7 +51,8 @@ class MEDIA_EXPORT AudioFifo {
const int max_frames_;
// Number of actual elements in the FIFO.
- int frames_;
+ volatile base::subtle::Atomic32 frames_pushed_;
+ volatile base::subtle::Atomic32 frames_consumed_;
// Current read position.
int read_pos_;
diff --git a/media/media.gyp b/media/media.gyp
index c35a9f6..0d23796 100644
--- a/media/media.gyp
+++ b/media/media.gyp
@@ -117,6 +117,8 @@
'audio/mac/audio_manager_mac.h',
'audio/mac/audio_output_mac.cc',
'audio/mac/audio_output_mac.h',
+ 'audio/mac/audio_synchronized_mac.cc',
+ 'audio/mac/audio_synchronized_mac.h',
'audio/mac/audio_unified_mac.cc',
'audio/mac/audio_unified_mac.h',
'audio/null_audio_sink.cc',