summaryrefslogtreecommitdiffstats
path: root/media/audio
diff options
context:
space:
mode:
authorskobes@google.com <skobes@google.com@0039d316-1c4b-4281-b951-d872f2087c98>2014-02-07 22:08:05 +0000
committerskobes@google.com <skobes@google.com@0039d316-1c4b-4281-b951-d872f2087c98>2014-02-07 22:08:05 +0000
commitab2066f6062f3acec61bce9b2cb52910549d051d (patch)
treea7120570dff5c48844cb0b312cf362f5ab41666e /media/audio
parent0cfa96c53fc8f28c5c60c950fb278db89a05d9ad (diff)
downloadchromium_src-ab2066f6062f3acec61bce9b2cb52910549d051d.zip
chromium_src-ab2066f6062f3acec61bce9b2cb52910549d051d.tar.gz
chromium_src-ab2066f6062f3acec61bce9b2cb52910549d051d.tar.bz2
Revert 249790 "Remove the unified IO code on the browser."
http://build.chromium.org/p/chromium.chromiumos/builders/ChromiumOS%20%28amd64%29/builds/14117 chromeos-chrome-34.0.1829.0_alpha-r1: ../../../../../../../home/chrome-bot/chrome_root/src/media/audio/linux/audio_manager_linux.cc: In function 'media::AudioManager* media::CreateAudioManager(media::AudioLogFactory*)': chromeos-chrome-34.0.1829.0_alpha-r1: ../../../../../../../home/chrome-bot/chrome_root/src/media/audio/linux/audio_manager_linux.cc:33:50: error: cannot allocate an object of abstract type 'media::AudioManagerCras' chromeos-chrome-34.0.1829.0_alpha-r1: return new AudioManagerCras(audio_log_factory); chromeos-chrome-34.0.1829.0_alpha-r1: ^ > Remove the unified IO code on the browser. > > Unified IO is not used any more and it should be removed. > > > BUG=337096 > TEST=bots, and nothing breaks. > > Review URL: https://codereview.chromium.org/153623004 TBR=xians@chromium.org Review URL: https://codereview.chromium.org/136233005 git-svn-id: svn://svn.chromium.org/chrome/trunk/src@249811 0039d316-1c4b-4281-b951-d872f2087c98
Diffstat (limited to 'media/audio')
-rw-r--r--media/audio/alsa/alsa_output_unittest.cc5
-rw-r--r--media/audio/alsa/audio_manager_alsa.cc4
-rw-r--r--media/audio/alsa/audio_manager_alsa.h3
-rw-r--r--media/audio/android/audio_android_unittest.cc12
-rw-r--r--media/audio/android/audio_manager_android.cc9
-rw-r--r--media/audio/android/audio_manager_android.h6
-rw-r--r--media/audio/audio_logging.h8
-rw-r--r--media/audio/audio_low_latency_input_output_unittest.cc3
-rw-r--r--media/audio/audio_manager.h11
-rw-r--r--media/audio/audio_manager_base.cc31
-rw-r--r--media/audio/audio_manager_base.h10
-rw-r--r--media/audio/audio_output_controller.cc9
-rw-r--r--media/audio/audio_output_controller.h10
-rw-r--r--media/audio/audio_output_controller_unittest.cc2
-rw-r--r--media/audio/audio_output_dispatcher.cc6
-rw-r--r--media/audio/audio_output_dispatcher.h9
-rw-r--r--media/audio/audio_output_dispatcher_impl.cc8
-rw-r--r--media/audio/audio_output_dispatcher_impl.h1
-rw-r--r--media/audio/audio_output_proxy_unittest.cc53
-rw-r--r--media/audio/audio_output_resampler.cc9
-rw-r--r--media/audio/audio_output_resampler.h1
-rw-r--r--media/audio/cras/cras_unified_unittest.cc5
-rw-r--r--media/audio/fake_audio_log_factory.cc3
-rw-r--r--media/audio/fake_audio_manager.cc3
-rw-r--r--media/audio/fake_audio_manager.h3
-rw-r--r--media/audio/mac/aggregate_device_manager.cc371
-rw-r--r--media/audio/mac/aggregate_device_manager.h58
-rw-r--r--media/audio/mac/audio_auhal_mac_unittest.cc2
-rw-r--r--media/audio/mac/audio_manager_mac.cc78
-rw-r--r--media/audio/mac/audio_manager_mac.h8
-rw-r--r--media/audio/mac/audio_synchronized_mac.cc976
-rw-r--r--media/audio/mac/audio_synchronized_mac.h216
-rw-r--r--media/audio/mac/audio_unified_mac.cc397
-rw-r--r--media/audio/mac/audio_unified_mac.h100
-rw-r--r--media/audio/mock_audio_manager.cc6
-rw-r--r--media/audio/mock_audio_manager.h6
-rw-r--r--media/audio/openbsd/audio_manager_openbsd.cc3
-rw-r--r--media/audio/openbsd/audio_manager_openbsd.h3
-rw-r--r--media/audio/pulse/audio_manager_pulse.cc14
-rw-r--r--media/audio/pulse/audio_manager_pulse.h6
-rw-r--r--media/audio/pulse/pulse_unified.cc292
-rw-r--r--media/audio/pulse/pulse_unified.h90
-rw-r--r--media/audio/sounds/audio_stream_handler.cc2
-rw-r--r--media/audio/win/audio_low_latency_output_win_unittest.cc2
-rw-r--r--media/audio/win/audio_manager_win.cc13
-rw-r--r--media/audio/win/audio_manager_win.h3
-rw-r--r--media/audio/win/audio_output_win_unittest.cc40
-rw-r--r--media/audio/win/audio_unified_win.cc984
-rw-r--r--media/audio/win/audio_unified_win.h352
-rw-r--r--media/audio/win/audio_unified_win_unittest.cc356
50 files changed, 4478 insertions, 124 deletions
diff --git a/media/audio/alsa/alsa_output_unittest.cc b/media/audio/alsa/alsa_output_unittest.cc
index 8b0aeae..07ce8ea 100644
--- a/media/audio/alsa/alsa_output_unittest.cc
+++ b/media/audio/alsa/alsa_output_unittest.cc
@@ -75,9 +75,10 @@ class MockAudioManagerAlsa : public AudioManagerAlsa {
MOCK_METHOD0(HasAudioInputDevices, bool());
MOCK_METHOD1(MakeLinearOutputStream, AudioOutputStream*(
const AudioParameters& params));
- MOCK_METHOD2(MakeLowLatencyOutputStream, AudioOutputStream*(
+ MOCK_METHOD3(MakeLowLatencyOutputStream, AudioOutputStream*(
const AudioParameters& params,
- const std::string& device_id));
+ const std::string& device_id,
+ const std::string& input_device_id));
MOCK_METHOD2(MakeLowLatencyInputStream, AudioInputStream*(
const AudioParameters& params, const std::string& device_id));
diff --git a/media/audio/alsa/audio_manager_alsa.cc b/media/audio/alsa/audio_manager_alsa.cc
index 7d6421f..ac61a5f 100644
--- a/media/audio/alsa/audio_manager_alsa.cc
+++ b/media/audio/alsa/audio_manager_alsa.cc
@@ -283,9 +283,11 @@ AudioOutputStream* AudioManagerAlsa::MakeLinearOutputStream(
AudioOutputStream* AudioManagerAlsa::MakeLowLatencyOutputStream(
const AudioParameters& params,
- const std::string& device_id) {
+ const std::string& device_id,
+ const std::string& input_device_id) {
DLOG_IF(ERROR, !device_id.empty()) << "Not implemented!";
DCHECK_EQ(AudioParameters::AUDIO_PCM_LOW_LATENCY, params.format());
+ // TODO(xians): Use input_device_id for unified IO.
return MakeOutputStream(params);
}
diff --git a/media/audio/alsa/audio_manager_alsa.h b/media/audio/alsa/audio_manager_alsa.h
index d08c3ba..155089f 100644
--- a/media/audio/alsa/audio_manager_alsa.h
+++ b/media/audio/alsa/audio_manager_alsa.h
@@ -37,7 +37,8 @@ class MEDIA_EXPORT AudioManagerAlsa : public AudioManagerBase {
const AudioParameters& params) OVERRIDE;
virtual AudioOutputStream* MakeLowLatencyOutputStream(
const AudioParameters& params,
- const std::string& device_id) OVERRIDE;
+ const std::string& device_id,
+ const std::string& input_device_id) OVERRIDE;
virtual AudioInputStream* MakeLinearInputStream(
const AudioParameters& params, const std::string& device_id) OVERRIDE;
virtual AudioInputStream* MakeLowLatencyInputStream(
diff --git a/media/audio/android/audio_android_unittest.cc b/media/audio/android/audio_android_unittest.cc
index a8d66a6..91bf5d8 100644
--- a/media/audio/android/audio_android_unittest.cc
+++ b/media/audio/android/audio_android_unittest.cc
@@ -438,7 +438,7 @@ class AudioAndroidOutputTest : public testing::Test {
const int num_callbacks =
(kCallbackTestTimeMs / expected_time_between_callbacks_ms);
AudioOutputStream* stream = audio_manager()->MakeAudioOutputStream(
- params, std::string());
+ params, std::string(), std::string());
EXPECT_TRUE(stream);
int count = 0;
@@ -621,7 +621,7 @@ TEST_P(AudioAndroidInputTest, CreateAndCloseInputStream) {
TEST_F(AudioAndroidOutputTest, CreateAndCloseOutputStream) {
AudioParameters params = GetDefaultOutputStreamParameters();
AudioOutputStream* aos = audio_manager()->MakeAudioOutputStream(
- params, std::string());
+ params, std::string(), std::string());
EXPECT_TRUE(aos);
aos->Close();
}
@@ -640,7 +640,7 @@ TEST_P(AudioAndroidInputTest, OpenAndCloseInputStream) {
TEST_F(AudioAndroidOutputTest, OpenAndCloseOutputStream) {
AudioParameters params = GetDefaultOutputStreamParameters();
AudioOutputStream* aos = audio_manager()->MakeAudioOutputStream(
- params, std::string());
+ params, std::string(), std::string());
EXPECT_TRUE(aos);
EXPECT_TRUE(aos->Open());
aos->Close();
@@ -701,7 +701,7 @@ TEST_F(AudioAndroidOutputTest, DISABLED_RunOutputStreamWithFileAsSource) {
AudioParameters params = GetDefaultOutputStreamParameters();
VLOG(1) << params;
AudioOutputStream* aos = audio_manager()->MakeAudioOutputStream(
- params, std::string());
+ params, std::string(), std::string());
EXPECT_TRUE(aos);
std::string file_name;
@@ -770,7 +770,7 @@ TEST_P(AudioAndroidInputTest, DISABLED_RunDuplexInputStreamWithFileAsSink) {
AudioParameters out_params =
audio_manager()->GetDefaultOutputStreamParameters();
AudioOutputStream* aos = audio_manager()->MakeAudioOutputStream(
- out_params, std::string());
+ out_params, std::string(), std::string());
EXPECT_TRUE(aos);
std::string file_name = base::StringPrintf("out_duplex_%d_%d_%d.pcm",
@@ -829,7 +829,7 @@ TEST_P(AudioAndroidInputTest,
io_params, AudioManagerBase::kDefaultDeviceId);
EXPECT_TRUE(ais);
AudioOutputStream* aos = audio_manager()->MakeAudioOutputStream(
- io_params, std::string());
+ io_params, std::string(), std::string());
EXPECT_TRUE(aos);
FullDuplexAudioSinkSource full_duplex(io_params);
diff --git a/media/audio/android/audio_manager_android.cc b/media/audio/android/audio_manager_android.cc
index a8d81c7..33e9e08 100644
--- a/media/audio/android/audio_manager_android.cc
+++ b/media/audio/android/audio_manager_android.cc
@@ -122,10 +122,12 @@ AudioParameters AudioManagerAndroid::GetInputStreamParameters(
AudioOutputStream* AudioManagerAndroid::MakeAudioOutputStream(
const AudioParameters& params,
- const std::string& device_id) {
+ const std::string& device_id,
+ const std::string& input_device_id) {
bool had_no_streams = HadNoAudioStreams();
AudioOutputStream* stream =
- AudioManagerBase::MakeAudioOutputStream(params, std::string());
+ AudioManagerBase::MakeAudioOutputStream(params, std::string(),
+ std::string());
// The audio manager for Android creates streams intended for real-time
// VoIP sessions and therefore sets the audio mode to MODE_IN_COMMUNICATION.
@@ -185,7 +187,8 @@ AudioOutputStream* AudioManagerAndroid::MakeLinearOutputStream(
AudioOutputStream* AudioManagerAndroid::MakeLowLatencyOutputStream(
const AudioParameters& params,
- const std::string& device_id) {
+ const std::string& device_id,
+ const std::string& input_device_id) {
DLOG_IF(ERROR, !device_id.empty()) << "Not implemented!";
DCHECK_EQ(AudioParameters::AUDIO_PCM_LOW_LATENCY, params.format());
return new OpenSLESOutputStream(this, params);
diff --git a/media/audio/android/audio_manager_android.h b/media/audio/android/audio_manager_android.h
index ac67d08..670c094 100644
--- a/media/audio/android/audio_manager_android.h
+++ b/media/audio/android/audio_manager_android.h
@@ -33,7 +33,8 @@ class MEDIA_EXPORT AudioManagerAndroid : public AudioManagerBase {
virtual AudioOutputStream* MakeAudioOutputStream(
const AudioParameters& params,
- const std::string& device_id) OVERRIDE;
+ const std::string& device_id,
+ const std::string& input_device_id) OVERRIDE;
virtual AudioInputStream* MakeAudioInputStream(
const AudioParameters& params,
const std::string& device_id) OVERRIDE;
@@ -45,7 +46,8 @@ class MEDIA_EXPORT AudioManagerAndroid : public AudioManagerBase {
const AudioParameters& params) OVERRIDE;
virtual AudioOutputStream* MakeLowLatencyOutputStream(
const AudioParameters& params,
- const std::string& device_id) OVERRIDE;
+ const std::string& device_id,
+ const std::string& input_device_id) OVERRIDE;
virtual AudioInputStream* MakeLinearInputStream(
const AudioParameters& params,
const std::string& device_id) OVERRIDE;
diff --git a/media/audio/audio_logging.h b/media/audio/audio_logging.h
index 913b8ec..1d8366b 100644
--- a/media/audio/audio_logging.h
+++ b/media/audio/audio_logging.h
@@ -20,11 +20,13 @@ class AudioLog {
virtual ~AudioLog() {}
// Called when an audio component is created. |params| are the parameters of
- // the created stream. |device_id| is the id of the audio device opened by
- // the created stream.
+ // the created stream. |input_device_id| and |output_device_id| are the
+ // respective device ids for input and output. Either one or both may be
+ // specified.
virtual void OnCreated(int component_id,
const media::AudioParameters& params,
- const std::string& device_id) = 0;
+ const std::string& input_device_id,
+ const std::string& output_device_id) = 0;
// Called when an audio component is started, generally this is synonymous
// with "playing."
diff --git a/media/audio/audio_low_latency_input_output_unittest.cc b/media/audio/audio_low_latency_input_output_unittest.cc
index df94a3f..0fff86c 100644
--- a/media/audio/audio_low_latency_input_output_unittest.cc
+++ b/media/audio/audio_low_latency_input_output_unittest.cc
@@ -312,7 +312,8 @@ class AudioOutputStreamTraits {
static StreamType* CreateStream(AudioManager* audio_manager,
const AudioParameters& params) {
- return audio_manager->MakeAudioOutputStream(params, std::string());
+ return audio_manager->MakeAudioOutputStream(params, std::string(),
+ std::string());
}
};
diff --git a/media/audio/audio_manager.h b/media/audio/audio_manager.h
index a15dab5..04e89a6 100644
--- a/media/audio/audio_manager.h
+++ b/media/audio/audio_manager.h
@@ -88,6 +88,11 @@ class MEDIA_EXPORT AudioManager {
// To create a stream for the default output device, pass an empty string
// for |device_id|, otherwise the specified audio device will be opened.
//
+ // The |input_device_id| is used for low-latency unified streams
+ // (input+output) only and then only if the audio parameters specify a >0
+ // input channel count. In other cases this id is ignored and should be
+ // empty.
+ //
// Returns NULL if the combination of the parameters is not supported, or if
// we have reached some other platform specific limit.
//
@@ -100,7 +105,8 @@ class MEDIA_EXPORT AudioManager {
// Do not free the returned AudioOutputStream. It is owned by AudioManager.
virtual AudioOutputStream* MakeAudioOutputStream(
const AudioParameters& params,
- const std::string& device_id) = 0;
+ const std::string& device_id,
+ const std::string& input_device_id) = 0;
// Creates new audio output proxy. A proxy implements
// AudioOutputStream interface, but unlike regular output stream
@@ -108,7 +114,8 @@ class MEDIA_EXPORT AudioManager {
// sound is actually playing.
virtual AudioOutputStream* MakeAudioOutputStreamProxy(
const AudioParameters& params,
- const std::string& device_id) = 0;
+ const std::string& device_id,
+ const std::string& input_device_id) = 0;
// Factory to create audio recording streams.
// |channels| can be 1 or 2.
diff --git a/media/audio/audio_manager_base.cc b/media/audio/audio_manager_base.cc
index f9dcafe..f4cd60e 100644
--- a/media/audio/audio_manager_base.cc
+++ b/media/audio/audio_manager_base.cc
@@ -37,13 +37,17 @@ const char AudioManagerBase::kLoopbackInputDeviceId[] = "loopback";
struct AudioManagerBase::DispatcherParams {
DispatcherParams(const AudioParameters& input,
const AudioParameters& output,
- const std::string& output_device_id)
+ const std::string& output_device_id,
+ const std::string& input_device_id)
: input_params(input),
- output_params(output) {}
+ output_params(output),
+ input_device_id(input_device_id),
+ output_device_id(output_device_id) {}
~DispatcherParams() {}
const AudioParameters input_params;
const AudioParameters output_params;
+ const std::string input_device_id;
const std::string output_device_id;
scoped_refptr<AudioOutputDispatcher> dispatcher;
@@ -59,11 +63,13 @@ class AudioManagerBase::CompareByParams {
// We will reuse the existing dispatcher when:
// 1) Unified IO is not used, input_params and output_params of the
// existing dispatcher are the same as the requested dispatcher.
- // 2) Unified IO is used, input_params and output_params of the existing
- // dispatcher are the same as the request dispatcher.
+ // 2) Unified IO is used, input_params, output_params and input_device_id
+ // of the existing dispatcher are the same as the request dispatcher.
return (dispatcher_->input_params == dispatcher_in->input_params &&
dispatcher_->output_params == dispatcher_in->output_params &&
- dispatcher_->output_device_id == dispatcher_in->output_device_id);
+ dispatcher_->output_device_id == dispatcher_in->output_device_id &&
+ (!dispatcher_->input_params.input_channels() ||
+ dispatcher_->input_device_id == dispatcher_in->input_device_id));
}
private:
@@ -133,7 +139,8 @@ AudioManagerBase::GetWorkerTaskRunner() {
AudioOutputStream* AudioManagerBase::MakeAudioOutputStream(
const AudioParameters& params,
- const std::string& device_id) {
+ const std::string& device_id,
+ const std::string& input_device_id) {
// TODO(miu): Fix ~50 call points across several unit test modules to call
// this method on the audio thread, then uncomment the following:
// DCHECK(task_runner_->BelongsToCurrentThread());
@@ -163,7 +170,7 @@ AudioOutputStream* AudioManagerBase::MakeAudioOutputStream(
stream = MakeLinearOutputStream(params);
break;
case AudioParameters::AUDIO_PCM_LOW_LATENCY:
- stream = MakeLowLatencyOutputStream(params, device_id);
+ stream = MakeLowLatencyOutputStream(params, device_id, input_device_id);
break;
case AudioParameters::AUDIO_FAKE:
stream = FakeAudioOutputStream::MakeFakeStream(this, params);
@@ -225,7 +232,8 @@ AudioInputStream* AudioManagerBase::MakeAudioInputStream(
AudioOutputStream* AudioManagerBase::MakeAudioOutputStreamProxy(
const AudioParameters& params,
- const std::string& device_id) {
+ const std::string& device_id,
+ const std::string& input_device_id) {
DCHECK(task_runner_->BelongsToCurrentThread());
// If the caller supplied an empty device id to select the default device,
@@ -265,7 +273,8 @@ AudioOutputStream* AudioManagerBase::MakeAudioOutputStreamProxy(
}
DispatcherParams* dispatcher_params =
- new DispatcherParams(params, output_params, output_device_id);
+ new DispatcherParams(params, output_params, output_device_id,
+ input_device_id);
AudioOutputDispatchers::iterator it =
std::find_if(output_dispatchers_.begin(), output_dispatchers_.end(),
@@ -280,12 +289,12 @@ AudioOutputStream* AudioManagerBase::MakeAudioOutputStreamProxy(
scoped_refptr<AudioOutputDispatcher> dispatcher;
if (output_params.format() != AudioParameters::AUDIO_FAKE) {
dispatcher = new AudioOutputResampler(this, params, output_params,
- output_device_id,
+ output_device_id, input_device_id,
kCloseDelay);
} else {
dispatcher = new AudioOutputDispatcherImpl(this, output_params,
output_device_id,
- kCloseDelay);
+ input_device_id, kCloseDelay);
}
dispatcher_params->dispatcher = dispatcher;
diff --git a/media/audio/audio_manager_base.h b/media/audio/audio_manager_base.h
index 4c088fb..e1ec49b 100644
--- a/media/audio/audio_manager_base.h
+++ b/media/audio/audio_manager_base.h
@@ -64,14 +64,16 @@ class MEDIA_EXPORT AudioManagerBase : public AudioManager {
virtual AudioOutputStream* MakeAudioOutputStream(
const AudioParameters& params,
- const std::string& device_id) OVERRIDE;
+ const std::string& device_id,
+ const std::string& input_device_id) OVERRIDE;
virtual AudioInputStream* MakeAudioInputStream(
const AudioParameters& params, const std::string& device_id) OVERRIDE;
virtual AudioOutputStream* MakeAudioOutputStreamProxy(
const AudioParameters& params,
- const std::string& device_id) OVERRIDE;
+ const std::string& device_id,
+ const std::string& input_device_id) OVERRIDE;
// Called internally by the audio stream when it has been closed.
virtual void ReleaseOutputStream(AudioOutputStream* stream);
@@ -83,9 +85,11 @@ class MEDIA_EXPORT AudioManagerBase : public AudioManager {
const AudioParameters& params) = 0;
// Creates the output stream for the |AUDIO_PCM_LOW_LATENCY| format.
+ // |input_device_id| is used by unified IO to open the correct input device.
virtual AudioOutputStream* MakeLowLatencyOutputStream(
const AudioParameters& params,
- const std::string& device_id) = 0;
+ const std::string& device_id,
+ const std::string& input_device_id) = 0;
// Creates the input stream for the |AUDIO_PCM_LINEAR| format. The legacy
// name is also from |AUDIO_PCM_LINEAR|.
diff --git a/media/audio/audio_output_controller.cc b/media/audio/audio_output_controller.cc
index 405a02f..0d9b0e8 100644
--- a/media/audio/audio_output_controller.cc
+++ b/media/audio/audio_output_controller.cc
@@ -36,11 +36,13 @@ AudioOutputController::AudioOutputController(
EventHandler* handler,
const AudioParameters& params,
const std::string& output_device_id,
+ const std::string& input_device_id,
SyncReader* sync_reader)
: audio_manager_(audio_manager),
params_(params),
handler_(handler),
output_device_id_(output_device_id),
+ input_device_id_(input_device_id),
stream_(NULL),
diverting_to_stream_(NULL),
volume_(1.0),
@@ -70,6 +72,7 @@ scoped_refptr<AudioOutputController> AudioOutputController::Create(
EventHandler* event_handler,
const AudioParameters& params,
const std::string& output_device_id,
+ const std::string& input_device_id,
SyncReader* sync_reader) {
DCHECK(audio_manager);
DCHECK(sync_reader);
@@ -78,7 +81,8 @@ scoped_refptr<AudioOutputController> AudioOutputController::Create(
return NULL;
scoped_refptr<AudioOutputController> controller(new AudioOutputController(
- audio_manager, event_handler, params, output_device_id, sync_reader));
+ audio_manager, event_handler, params, output_device_id, input_device_id,
+ sync_reader));
controller->message_loop_->PostTask(FROM_HERE, base::Bind(
&AudioOutputController::DoCreate, controller, false));
return controller;
@@ -137,7 +141,8 @@ void AudioOutputController::DoCreate(bool is_for_device_change) {
stream_ = diverting_to_stream_ ?
diverting_to_stream_ :
- audio_manager_->MakeAudioOutputStreamProxy(params_, output_device_id_);
+ audio_manager_->MakeAudioOutputStreamProxy(params_, output_device_id_,
+ input_device_id_);
if (!stream_) {
state_ = kError;
handler_->OnError();
diff --git a/media/audio/audio_output_controller.h b/media/audio/audio_output_controller.h
index 1a250f9..121b446 100644
--- a/media/audio/audio_output_controller.h
+++ b/media/audio/audio_output_controller.h
@@ -107,11 +107,13 @@ class MEDIA_EXPORT AudioOutputController
// OnCreated() call from the same audio manager thread. |audio_manager| must
// outlive AudioOutputController.
// The |output_device_id| can be either empty (default device) or specify a
- // specific hardware device for audio output.
+ // specific hardware device for audio output. The |input_device_id| is
+ // used only for unified audio when opening up input and output at the same
+ // time (controlled by |params.input_channel_count()|).
static scoped_refptr<AudioOutputController> Create(
AudioManager* audio_manager, EventHandler* event_handler,
const AudioParameters& params, const std::string& output_device_id,
- SyncReader* sync_reader);
+ const std::string& input_device_id, SyncReader* sync_reader);
// Methods to control playback of the stream.
@@ -191,6 +193,7 @@ class MEDIA_EXPORT AudioOutputController
AudioOutputController(AudioManager* audio_manager, EventHandler* handler,
const AudioParameters& params,
const std::string& output_device_id,
+ const std::string& input_device_id,
SyncReader* sync_reader);
// The following methods are executed on the audio manager thread.
@@ -231,6 +234,9 @@ class MEDIA_EXPORT AudioOutputController
// default output device.
std::string output_device_id_;
+ // Used by the unified IO to open the correct input device.
+ const std::string input_device_id_;
+
AudioOutputStream* stream_;
// When non-NULL, audio is being diverted to this stream.
diff --git a/media/audio/audio_output_controller_unittest.cc b/media/audio/audio_output_controller_unittest.cc
index b4270e6..288acd9 100644
--- a/media/audio/audio_output_controller_unittest.cc
+++ b/media/audio/audio_output_controller_unittest.cc
@@ -121,7 +121,7 @@ class AudioOutputControllerTest : public testing::Test {
controller_ = AudioOutputController::Create(
audio_manager_.get(), &mock_event_handler_, params_, std::string(),
- &mock_sync_reader_);
+ std::string(), &mock_sync_reader_);
if (controller_.get())
controller_->SetVolume(kTestVolume);
diff --git a/media/audio/audio_output_dispatcher.cc b/media/audio/audio_output_dispatcher.cc
index 7f3dd10..1e78c9d 100644
--- a/media/audio/audio_output_dispatcher.cc
+++ b/media/audio/audio_output_dispatcher.cc
@@ -11,11 +11,13 @@ namespace media {
AudioOutputDispatcher::AudioOutputDispatcher(
AudioManager* audio_manager,
const AudioParameters& params,
- const std::string& device_id)
+ const std::string& output_device_id,
+ const std::string& input_device_id)
: audio_manager_(audio_manager),
task_runner_(audio_manager->GetTaskRunner()),
params_(params),
- device_id_(device_id) {
+ output_device_id_(output_device_id),
+ input_device_id_(input_device_id) {
// We expect to be instantiated on the audio thread. Otherwise the
// |task_runner_| member will point to the wrong message loop!
DCHECK(audio_manager->GetTaskRunner()->BelongsToCurrentThread());
diff --git a/media/audio/audio_output_dispatcher.h b/media/audio/audio_output_dispatcher.h
index d070b6b..69c5f16 100644
--- a/media/audio/audio_output_dispatcher.h
+++ b/media/audio/audio_output_dispatcher.h
@@ -38,7 +38,8 @@ class MEDIA_EXPORT AudioOutputDispatcher
public:
AudioOutputDispatcher(AudioManager* audio_manager,
const AudioParameters& params,
- const std::string& device_id);
+ const std::string& output_device_id,
+ const std::string& input_device_id);
// Called by AudioOutputProxy to open the stream.
// Returns false, if it fails to open it.
@@ -72,7 +73,8 @@ class MEDIA_EXPORT AudioOutputDispatcher
virtual void CloseStreamsForWedgeFix() = 0;
virtual void RestartStreamsForWedgeFix() = 0;
- const std::string& device_id() const { return device_id_; }
+ // Accessor to the input device id used by unified IO.
+ const std::string& input_device_id() const { return input_device_id_; }
protected:
friend class base::RefCountedThreadSafe<AudioOutputDispatcher>;
@@ -83,7 +85,8 @@ class MEDIA_EXPORT AudioOutputDispatcher
AudioManager* audio_manager_;
const scoped_refptr<base::SingleThreadTaskRunner> task_runner_;
const AudioParameters params_;
- std::string device_id_;
+ std::string output_device_id_;
+ const std::string input_device_id_;
private:
DISALLOW_COPY_AND_ASSIGN(AudioOutputDispatcher);
diff --git a/media/audio/audio_output_dispatcher_impl.cc b/media/audio/audio_output_dispatcher_impl.cc
index 5db170e..d2e1d1c 100644
--- a/media/audio/audio_output_dispatcher_impl.cc
+++ b/media/audio/audio_output_dispatcher_impl.cc
@@ -19,10 +19,12 @@ AudioOutputDispatcherImpl::AudioOutputDispatcherImpl(
AudioManager* audio_manager,
const AudioParameters& params,
const std::string& output_device_id,
+ const std::string& input_device_id,
const base::TimeDelta& close_delay)
: AudioOutputDispatcher(audio_manager,
params,
- output_device_id),
+ output_device_id,
+ input_device_id),
idle_proxies_(0),
close_timer_(FROM_HERE,
close_delay,
@@ -129,7 +131,7 @@ void AudioOutputDispatcherImpl::Shutdown() {
bool AudioOutputDispatcherImpl::CreateAndOpenStream() {
DCHECK(task_runner_->BelongsToCurrentThread());
AudioOutputStream* stream = audio_manager_->MakeAudioOutputStream(
- params_, device_id_);
+ params_, output_device_id_, input_device_id_);
if (!stream)
return false;
@@ -141,7 +143,7 @@ bool AudioOutputDispatcherImpl::CreateAndOpenStream() {
const int stream_id = audio_stream_id_++;
audio_stream_ids_[stream] = stream_id;
audio_log_->OnCreated(
- stream_id, params_, device_id_);
+ stream_id, params_, input_device_id_, output_device_id_);
idle_streams_.push_back(stream);
return true;
diff --git a/media/audio/audio_output_dispatcher_impl.h b/media/audio/audio_output_dispatcher_impl.h
index cb1ddb9..037e114 100644
--- a/media/audio/audio_output_dispatcher_impl.h
+++ b/media/audio/audio_output_dispatcher_impl.h
@@ -36,6 +36,7 @@ class MEDIA_EXPORT AudioOutputDispatcherImpl : public AudioOutputDispatcher {
AudioOutputDispatcherImpl(AudioManager* audio_manager,
const AudioParameters& params,
const std::string& output_device_id,
+ const std::string& input_device_id,
const base::TimeDelta& close_delay);
// Opens a new physical stream if there are no pending streams in
diff --git a/media/audio/audio_output_proxy_unittest.cc b/media/audio/audio_output_proxy_unittest.cc
index 534a6d9..1c37dc5 100644
--- a/media/audio/audio_output_proxy_unittest.cc
+++ b/media/audio/audio_output_proxy_unittest.cc
@@ -93,12 +93,14 @@ class MockAudioManager : public AudioManagerBase {
MOCK_METHOD0(HasAudioOutputDevices, bool());
MOCK_METHOD0(HasAudioInputDevices, bool());
MOCK_METHOD0(GetAudioInputDeviceModel, base::string16());
- MOCK_METHOD2(MakeAudioOutputStream, AudioOutputStream*(
+ MOCK_METHOD3(MakeAudioOutputStream, AudioOutputStream*(
const AudioParameters& params,
- const std::string& device_id));
- MOCK_METHOD2(MakeAudioOutputStreamProxy, AudioOutputStream*(
+ const std::string& device_id,
+ const std::string& input_device_id));
+ MOCK_METHOD3(MakeAudioOutputStreamProxy, AudioOutputStream*(
const AudioParameters& params,
- const std::string& device_id));
+ const std::string& device_id,
+ const std::string& input_device_id));
MOCK_METHOD2(MakeAudioInputStream, AudioInputStream*(
const AudioParameters& params, const std::string& device_id));
MOCK_METHOD0(ShowAudioInputSettings, void());
@@ -110,8 +112,9 @@ class MockAudioManager : public AudioManagerBase {
MOCK_METHOD1(MakeLinearOutputStream, AudioOutputStream*(
const AudioParameters& params));
- MOCK_METHOD2(MakeLowLatencyOutputStream, AudioOutputStream*(
- const AudioParameters& params, const std::string& device_id));
+ MOCK_METHOD3(MakeLowLatencyOutputStream, AudioOutputStream*(
+ const AudioParameters& params, const std::string& device_id,
+ const std::string& input_device_id));
MOCK_METHOD2(MakeLinearInputStream, AudioInputStream*(
const AudioParameters& params, const std::string& device_id));
MOCK_METHOD2(MakeLowLatencyInputStream, AudioInputStream*(
@@ -165,6 +168,7 @@ class AudioOutputProxyTest : public testing::Test {
dispatcher_impl_ = new AudioOutputDispatcherImpl(&manager(),
params_,
std::string(),
+ std::string(),
close_delay);
}
@@ -195,7 +199,7 @@ class AudioOutputProxyTest : public testing::Test {
void OpenAndClose(AudioOutputDispatcher* dispatcher) {
MockAudioOutputStream stream(&manager_, params_);
- EXPECT_CALL(manager(), MakeAudioOutputStream(_, _))
+ EXPECT_CALL(manager(), MakeAudioOutputStream(_, _, _))
.WillOnce(Return(&stream));
EXPECT_CALL(stream, Open())
.WillOnce(Return(true));
@@ -209,7 +213,7 @@ class AudioOutputProxyTest : public testing::Test {
void StartAndStop(AudioOutputDispatcher* dispatcher) {
MockAudioOutputStream stream(&manager_, params_);
- EXPECT_CALL(manager(), MakeAudioOutputStream(_, _))
+ EXPECT_CALL(manager(), MakeAudioOutputStream(_, _, _))
.WillOnce(Return(&stream));
EXPECT_CALL(stream, Open())
.WillOnce(Return(true));
@@ -232,7 +236,7 @@ class AudioOutputProxyTest : public testing::Test {
void CloseAfterStop(AudioOutputDispatcher* dispatcher) {
MockAudioOutputStream stream(&manager_, params_);
- EXPECT_CALL(manager(), MakeAudioOutputStream(_, _))
+ EXPECT_CALL(manager(), MakeAudioOutputStream(_, _, _))
.WillOnce(Return(&stream));
EXPECT_CALL(stream, Open())
.WillOnce(Return(true));
@@ -257,7 +261,7 @@ class AudioOutputProxyTest : public testing::Test {
void TwoStreams(AudioOutputDispatcher* dispatcher) {
MockAudioOutputStream stream(&manager_, params_);
- EXPECT_CALL(manager(), MakeAudioOutputStream(_, _))
+ EXPECT_CALL(manager(), MakeAudioOutputStream(_, _, _))
.WillOnce(Return(&stream));
EXPECT_CALL(stream, Open())
.WillOnce(Return(true));
@@ -276,7 +280,7 @@ class AudioOutputProxyTest : public testing::Test {
void OpenFailed(AudioOutputDispatcher* dispatcher) {
MockAudioOutputStream stream(&manager_, params_);
- EXPECT_CALL(manager(), MakeAudioOutputStream(_, _))
+ EXPECT_CALL(manager(), MakeAudioOutputStream(_, _, _))
.WillOnce(Return(&stream));
EXPECT_CALL(stream, Open())
.WillOnce(Return(false));
@@ -293,7 +297,7 @@ class AudioOutputProxyTest : public testing::Test {
void CreateAndWait(AudioOutputDispatcher* dispatcher) {
MockAudioOutputStream stream(&manager_, params_);
- EXPECT_CALL(manager(), MakeAudioOutputStream(_, _))
+ EXPECT_CALL(manager(), MakeAudioOutputStream(_, _, _))
.WillOnce(Return(&stream));
EXPECT_CALL(stream, Open())
.WillOnce(Return(true));
@@ -310,7 +314,7 @@ class AudioOutputProxyTest : public testing::Test {
void OneStream_TwoPlays(AudioOutputDispatcher* dispatcher) {
MockAudioOutputStream stream(&manager_, params_);
- EXPECT_CALL(manager(), MakeAudioOutputStream(_, _))
+ EXPECT_CALL(manager(), MakeAudioOutputStream(_, _, _))
.WillOnce(Return(&stream));
EXPECT_CALL(stream, Open())
@@ -342,7 +346,7 @@ class AudioOutputProxyTest : public testing::Test {
MockAudioOutputStream stream1(&manager_, params_);
MockAudioOutputStream stream2(&manager_, params_);
- EXPECT_CALL(manager(), MakeAudioOutputStream(_, _))
+ EXPECT_CALL(manager(), MakeAudioOutputStream(_, _, _))
.WillOnce(Return(&stream1))
.WillOnce(Return(&stream2));
@@ -379,7 +383,7 @@ class AudioOutputProxyTest : public testing::Test {
void StartFailed(AudioOutputDispatcher* dispatcher) {
MockAudioOutputStream stream(&manager_, params_);
- EXPECT_CALL(manager(), MakeAudioOutputStream(_, _))
+ EXPECT_CALL(manager(), MakeAudioOutputStream(_, _, _))
.WillOnce(Return(&stream));
EXPECT_CALL(stream, Open())
.WillOnce(Return(true));
@@ -390,7 +394,7 @@ class AudioOutputProxyTest : public testing::Test {
WaitForCloseTimer(&stream);
// |stream| is closed at this point. Start() should reopen it again.
- EXPECT_CALL(manager(), MakeAudioOutputStream(_, _))
+ EXPECT_CALL(manager(), MakeAudioOutputStream(_, _, _))
.Times(2)
.WillRepeatedly(Return(reinterpret_cast<AudioOutputStream*>(NULL)));
@@ -430,7 +434,8 @@ class AudioOutputResamplerTest : public AudioOutputProxyTest {
AudioParameters::AUDIO_PCM_LOW_LATENCY, CHANNEL_LAYOUT_STEREO,
16000, 16, 1024);
resampler_ = new AudioOutputResampler(
- &manager(), params_, resampler_params_, std::string(), close_delay);
+ &manager(), params_, resampler_params_, std::string(), std::string(),
+ close_delay);
}
virtual void OnStart() OVERRIDE {
@@ -530,7 +535,7 @@ TEST_F(AudioOutputResamplerTest, StartFailed) { StartFailed(resampler_); }
// ensure AudioOutputResampler falls back to the high latency path.
TEST_F(AudioOutputResamplerTest, LowLatencyCreateFailedFallback) {
MockAudioOutputStream stream(&manager_, params_);
- EXPECT_CALL(manager(), MakeAudioOutputStream(_, _))
+ EXPECT_CALL(manager(), MakeAudioOutputStream(_, _, _))
.Times(2)
.WillOnce(Return(static_cast<AudioOutputStream*>(NULL)))
.WillRepeatedly(Return(&stream));
@@ -547,7 +552,7 @@ TEST_F(AudioOutputResamplerTest, LowLatencyCreateFailedFallback) {
TEST_F(AudioOutputResamplerTest, LowLatencyOpenFailedFallback) {
MockAudioOutputStream failed_stream(&manager_, params_);
MockAudioOutputStream okay_stream(&manager_, params_);
- EXPECT_CALL(manager(), MakeAudioOutputStream(_, _))
+ EXPECT_CALL(manager(), MakeAudioOutputStream(_, _, _))
.Times(2)
.WillOnce(Return(&failed_stream))
.WillRepeatedly(Return(&okay_stream));
@@ -575,7 +580,7 @@ TEST_F(AudioOutputResamplerTest, HighLatencyFallbackFailed) {
#else
static const int kFallbackCount = 1;
#endif
- EXPECT_CALL(manager(), MakeAudioOutputStream(_, _))
+ EXPECT_CALL(manager(), MakeAudioOutputStream(_, _, _))
.Times(kFallbackCount)
.WillRepeatedly(Return(static_cast<AudioOutputStream*>(NULL)));
@@ -586,7 +591,7 @@ TEST_F(AudioOutputResamplerTest, HighLatencyFallbackFailed) {
testing::Property(&AudioParameters::sample_rate, params_.sample_rate()),
testing::Property(
&AudioParameters::frames_per_buffer, params_.frames_per_buffer())),
- _))
+ _, _))
.Times(1)
.WillOnce(Return(&okay_stream));
EXPECT_CALL(okay_stream, Open())
@@ -608,7 +613,7 @@ TEST_F(AudioOutputResamplerTest, AllFallbackFailed) {
#else
static const int kFallbackCount = 2;
#endif
- EXPECT_CALL(manager(), MakeAudioOutputStream(_, _))
+ EXPECT_CALL(manager(), MakeAudioOutputStream(_, _, _))
.Times(kFallbackCount)
.WillRepeatedly(Return(static_cast<AudioOutputStream*>(NULL)));
@@ -624,7 +629,7 @@ TEST_F(AudioOutputResamplerTest, LowLatencyOpenEventuallyFails) {
MockAudioOutputStream stream2(&manager_, params_);
// Setup the mock such that all three streams are successfully created.
- EXPECT_CALL(manager(), MakeAudioOutputStream(_, _))
+ EXPECT_CALL(manager(), MakeAudioOutputStream(_, _, _))
.WillOnce(Return(&stream1))
.WillOnce(Return(&stream2))
.WillRepeatedly(Return(static_cast<AudioOutputStream*>(NULL)));
@@ -680,7 +685,7 @@ TEST_F(AudioOutputResamplerTest, WedgeFix) {
MockAudioOutputStream stream3(&manager_, params_);
// Setup the mock such that all three streams are successfully created.
- EXPECT_CALL(manager(), MakeAudioOutputStream(_, _))
+ EXPECT_CALL(manager(), MakeAudioOutputStream(_, _, _))
.WillOnce(Return(&stream1))
.WillOnce(Return(&stream2))
.WillOnce(Return(&stream3));
diff --git a/media/audio/audio_output_resampler.cc b/media/audio/audio_output_resampler.cc
index a5514c5..e6fcc56 100644
--- a/media/audio/audio_output_resampler.cc
+++ b/media/audio/audio_output_resampler.cc
@@ -135,7 +135,7 @@ void AudioOutputResampler::SetupFallbackParams() {
AudioParameters::AUDIO_PCM_LINEAR, params_.channel_layout(),
params_.sample_rate(), params_.bits_per_sample(),
frames_per_buffer);
- device_id_ = "";
+ output_device_id_ = "";
Initialize();
#endif
}
@@ -144,8 +144,10 @@ AudioOutputResampler::AudioOutputResampler(AudioManager* audio_manager,
const AudioParameters& input_params,
const AudioParameters& output_params,
const std::string& output_device_id,
+ const std::string& input_device_id,
const base::TimeDelta& close_delay)
- : AudioOutputDispatcher(audio_manager, input_params, output_device_id),
+ : AudioOutputDispatcher(audio_manager, input_params, output_device_id,
+ input_device_id),
close_delay_(close_delay),
output_params_(output_params),
streams_opened_(false) {
@@ -167,7 +169,8 @@ void AudioOutputResampler::Initialize() {
DCHECK(!streams_opened_);
DCHECK(callbacks_.empty());
dispatcher_ = new AudioOutputDispatcherImpl(
- audio_manager_, output_params_, device_id_, close_delay_);
+ audio_manager_, output_params_, output_device_id_, input_device_id_,
+ close_delay_);
}
bool AudioOutputResampler::OpenStream() {
diff --git a/media/audio/audio_output_resampler.h b/media/audio/audio_output_resampler.h
index 80c9d77..a8fca23 100644
--- a/media/audio/audio_output_resampler.h
+++ b/media/audio/audio_output_resampler.h
@@ -41,6 +41,7 @@ class MEDIA_EXPORT AudioOutputResampler : public AudioOutputDispatcher {
const AudioParameters& input_params,
const AudioParameters& output_params,
const std::string& output_device_id,
+ const std::string& input_device_id,
const base::TimeDelta& close_delay);
// AudioOutputDispatcher interface.
diff --git a/media/audio/cras/cras_unified_unittest.cc b/media/audio/cras/cras_unified_unittest.cc
index 9d282bb..ee36b10 100644
--- a/media/audio/cras/cras_unified_unittest.cc
+++ b/media/audio/cras/cras_unified_unittest.cc
@@ -37,9 +37,10 @@ class MockAudioManagerCras : public AudioManagerCras {
MOCK_METHOD0(HasAudioInputDevices, bool());
MOCK_METHOD1(MakeLinearOutputStream, AudioOutputStream*(
const AudioParameters& params));
- MOCK_METHOD2(MakeLowLatencyOutputStream,
+ MOCK_METHOD3(MakeLowLatencyOutputStream,
AudioOutputStream*(const AudioParameters& params,
- const std::string& device_id));
+ const std::string& device_id,
+ const std::string& input_device_id));
MOCK_METHOD2(MakeLinearOutputStream, AudioInputStream*(
const AudioParameters& params, const std::string& device_id));
MOCK_METHOD2(MakeLowLatencyInputStream, AudioInputStream*(
diff --git a/media/audio/fake_audio_log_factory.cc b/media/audio/fake_audio_log_factory.cc
index 5e2d134..6f752e5 100644
--- a/media/audio/fake_audio_log_factory.cc
+++ b/media/audio/fake_audio_log_factory.cc
@@ -12,7 +12,8 @@ class FakeAudioLogImpl : public AudioLog {
virtual ~FakeAudioLogImpl() {}
virtual void OnCreated(int component_id,
const media::AudioParameters& params,
- const std::string& device_id) OVERRIDE {}
+ const std::string& input_device_id,
+ const std::string& output_device_id) OVERRIDE {}
virtual void OnStarted(int component_id) OVERRIDE {}
virtual void OnStopped(int component_id) OVERRIDE {}
virtual void OnClosed(int component_id) OVERRIDE {}
diff --git a/media/audio/fake_audio_manager.cc b/media/audio/fake_audio_manager.cc
index e5d9bd4..bfe9a0a 100644
--- a/media/audio/fake_audio_manager.cc
+++ b/media/audio/fake_audio_manager.cc
@@ -33,7 +33,8 @@ AudioOutputStream* FakeAudioManager::MakeLinearOutputStream(
AudioOutputStream* FakeAudioManager::MakeLowLatencyOutputStream(
const AudioParameters& params,
- const std::string& device_id) {
+ const std::string& device_id,
+ const std::string& input_device_id) {
return FakeAudioOutputStream::MakeFakeStream(this, params);
}
diff --git a/media/audio/fake_audio_manager.h b/media/audio/fake_audio_manager.h
index 9fbf140..b5c4520 100644
--- a/media/audio/fake_audio_manager.h
+++ b/media/audio/fake_audio_manager.h
@@ -26,7 +26,8 @@ class MEDIA_EXPORT FakeAudioManager : public AudioManagerBase {
const AudioParameters& params) OVERRIDE;
virtual AudioOutputStream* MakeLowLatencyOutputStream(
const AudioParameters& params,
- const std::string& device_id) OVERRIDE;
+ const std::string& device_id,
+ const std::string& input_device_id) OVERRIDE;
virtual AudioInputStream* MakeLinearInputStream(const AudioParameters& params,
const std::string& device_id)
OVERRIDE;
diff --git a/media/audio/mac/aggregate_device_manager.cc b/media/audio/mac/aggregate_device_manager.cc
new file mode 100644
index 0000000..c7f3233
--- /dev/null
+++ b/media/audio/mac/aggregate_device_manager.cc
@@ -0,0 +1,371 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "media/audio/mac/aggregate_device_manager.h"
+
+#include <CoreAudio/AudioHardware.h>
+#include <string>
+
+#include "base/mac/mac_logging.h"
+#include "base/mac/scoped_cftyperef.h"
+#include "media/audio/audio_parameters.h"
+#include "media/audio/mac/audio_manager_mac.h"
+
+using base::ScopedCFTypeRef;
+
+namespace media {
+
+AggregateDeviceManager::AggregateDeviceManager()
+ : plugin_id_(kAudioObjectUnknown),
+ input_device_(kAudioDeviceUnknown),
+ output_device_(kAudioDeviceUnknown),
+ aggregate_device_(kAudioObjectUnknown) {
+}
+
+AggregateDeviceManager::~AggregateDeviceManager() {
+ DestroyAggregateDevice();
+}
+
+AudioDeviceID AggregateDeviceManager::GetDefaultAggregateDevice() {
+ AudioDeviceID current_input_device;
+ AudioDeviceID current_output_device;
+ AudioManagerMac::GetDefaultInputDevice(&current_input_device);
+ AudioManagerMac::GetDefaultOutputDevice(&current_output_device);
+
+ if (AudioManagerMac::HardwareSampleRateForDevice(current_input_device) !=
+ AudioManagerMac::HardwareSampleRateForDevice(current_output_device)) {
+ // TODO(crogers): with some extra work we can make aggregate devices work
+ // if the clock domain is the same but the sample-rate differ.
+ // For now we fallback to the synchronized path.
+ return kAudioDeviceUnknown;
+ }
+
+ // Use a lazily created aggregate device if it's already available
+ // and still appropriate.
+ if (aggregate_device_ != kAudioObjectUnknown) {
+ // TODO(crogers): handle default device changes for synchronized I/O.
+ // For now, we check to make sure the default devices haven't changed
+ // since we lazily created the aggregate device.
+ if (current_input_device == input_device_ &&
+ current_output_device == output_device_)
+ return aggregate_device_;
+
+ // For now, once lazily created don't attempt to create another
+ // aggregate device.
+ return kAudioDeviceUnknown;
+ }
+
+ input_device_ = current_input_device;
+ output_device_ = current_output_device;
+
+ // Only create an aggregrate device if the clock domains match.
+ UInt32 input_clockdomain = GetClockDomain(input_device_);
+ UInt32 output_clockdomain = GetClockDomain(output_device_);
+ DVLOG(1) << "input_clockdomain: " << input_clockdomain;
+ DVLOG(1) << "output_clockdomain: " << output_clockdomain;
+
+ if (input_clockdomain == 0 || input_clockdomain != output_clockdomain)
+ return kAudioDeviceUnknown;
+
+ OSStatus result = CreateAggregateDevice(
+ input_device_,
+ output_device_,
+ &aggregate_device_);
+ if (result != noErr)
+ DestroyAggregateDevice();
+
+ return aggregate_device_;
+}
+
+CFStringRef AggregateDeviceManager::GetDeviceUID(AudioDeviceID id) {
+ static const AudioObjectPropertyAddress kDeviceUIDAddress = {
+ kAudioDevicePropertyDeviceUID,
+ kAudioObjectPropertyScopeGlobal,
+ kAudioObjectPropertyElementMaster
+ };
+
+ // As stated in the CoreAudio header (AudioHardwareBase.h),
+ // the caller is responsible for releasing the device_UID.
+ CFStringRef device_UID;
+ UInt32 size = sizeof(device_UID);
+ OSStatus result = AudioObjectGetPropertyData(
+ id,
+ &kDeviceUIDAddress,
+ 0,
+ 0,
+ &size,
+ &device_UID);
+
+ return (result == noErr) ? device_UID : NULL;
+}
+
+void AggregateDeviceManager::GetDeviceName(
+ AudioDeviceID id, char* name, UInt32 size) {
+ static const AudioObjectPropertyAddress kDeviceNameAddress = {
+ kAudioDevicePropertyDeviceName,
+ kAudioObjectPropertyScopeGlobal,
+ kAudioObjectPropertyElementMaster
+ };
+
+ OSStatus result = AudioObjectGetPropertyData(
+ id,
+ &kDeviceNameAddress,
+ 0,
+ 0,
+ &size,
+ name);
+
+ if (result != noErr && size > 0)
+ name[0] = 0;
+}
+
+UInt32 AggregateDeviceManager::GetClockDomain(AudioDeviceID device_id) {
+ static const AudioObjectPropertyAddress kClockDomainAddress = {
+ kAudioDevicePropertyClockDomain,
+ kAudioObjectPropertyScopeGlobal,
+ kAudioObjectPropertyElementMaster
+ };
+
+ UInt32 clockdomain = 0;
+ UInt32 size = sizeof(UInt32);
+ OSStatus result = AudioObjectGetPropertyData(
+ device_id,
+ &kClockDomainAddress,
+ 0,
+ 0,
+ &size,
+ &clockdomain);
+
+ return (result == noErr) ? clockdomain : 0;
+}
+
+OSStatus AggregateDeviceManager::GetPluginID(AudioObjectID* id) {
+ DCHECK(id);
+
+ // Get the audio hardware plugin.
+ CFStringRef bundle_name = CFSTR("com.apple.audio.CoreAudio");
+
+ AudioValueTranslation plugin_translation;
+ plugin_translation.mInputData = &bundle_name;
+ plugin_translation.mInputDataSize = sizeof(bundle_name);
+ plugin_translation.mOutputData = id;
+ plugin_translation.mOutputDataSize = sizeof(*id);
+
+ static const AudioObjectPropertyAddress kPlugInAddress = {
+ kAudioHardwarePropertyPlugInForBundleID,
+ kAudioObjectPropertyScopeGlobal,
+ kAudioObjectPropertyElementMaster
+ };
+
+ UInt32 size = sizeof(plugin_translation);
+ OSStatus result = AudioObjectGetPropertyData(
+ kAudioObjectSystemObject,
+ &kPlugInAddress,
+ 0,
+ 0,
+ &size,
+ &plugin_translation);
+
+ DVLOG(1) << "CoreAudio plugin ID: " << *id;
+
+ return result;
+}
+
+CFMutableDictionaryRef
+AggregateDeviceManager::CreateAggregateDeviceDictionary(
+ AudioDeviceID input_id,
+ AudioDeviceID output_id) {
+ CFMutableDictionaryRef aggregate_device_dict = CFDictionaryCreateMutable(
+ NULL,
+ 0,
+ &kCFTypeDictionaryKeyCallBacks,
+ &kCFTypeDictionaryValueCallBacks);
+ if (!aggregate_device_dict)
+ return NULL;
+
+ const CFStringRef kAggregateDeviceName =
+ CFSTR("ChromeAggregateAudioDevice");
+ const CFStringRef kAggregateDeviceUID =
+ CFSTR("com.google.chrome.AggregateAudioDevice");
+
+ // Add name and UID of the device to the dictionary.
+ CFDictionaryAddValue(
+ aggregate_device_dict,
+ CFSTR(kAudioAggregateDeviceNameKey),
+ kAggregateDeviceName);
+ CFDictionaryAddValue(
+ aggregate_device_dict,
+ CFSTR(kAudioAggregateDeviceUIDKey),
+ kAggregateDeviceUID);
+
+ // Add a "private aggregate key" to the dictionary.
+ // The 1 value means that the created aggregate device will
+ // only be accessible from the process that created it, and
+ // won't be visible to outside processes.
+ int value = 1;
+ ScopedCFTypeRef<CFNumberRef> aggregate_device_number(CFNumberCreate(
+ NULL,
+ kCFNumberIntType,
+ &value));
+ CFDictionaryAddValue(
+ aggregate_device_dict,
+ CFSTR(kAudioAggregateDeviceIsPrivateKey),
+ aggregate_device_number);
+
+ return aggregate_device_dict;
+}
+
+CFMutableArrayRef
+AggregateDeviceManager::CreateSubDeviceArray(
+ CFStringRef input_device_UID, CFStringRef output_device_UID) {
+ CFMutableArrayRef sub_devices_array = CFArrayCreateMutable(
+ NULL,
+ 0,
+ &kCFTypeArrayCallBacks);
+
+ CFArrayAppendValue(sub_devices_array, input_device_UID);
+ CFArrayAppendValue(sub_devices_array, output_device_UID);
+
+ return sub_devices_array;
+}
+
+OSStatus AggregateDeviceManager::CreateAggregateDevice(
+ AudioDeviceID input_id,
+ AudioDeviceID output_id,
+ AudioDeviceID* aggregate_device) {
+ DCHECK(aggregate_device);
+
+ const size_t kMaxDeviceNameLength = 256;
+
+ scoped_ptr<char[]> input_device_name(new char[kMaxDeviceNameLength]);
+ GetDeviceName(
+ input_id,
+ input_device_name.get(),
+ sizeof(input_device_name));
+ DVLOG(1) << "Input device: \n" << input_device_name;
+
+ scoped_ptr<char[]> output_device_name(new char[kMaxDeviceNameLength]);
+ GetDeviceName(
+ output_id,
+ output_device_name.get(),
+ sizeof(output_device_name));
+ DVLOG(1) << "Output device: \n" << output_device_name;
+
+ OSStatus result = GetPluginID(&plugin_id_);
+ if (result != noErr)
+ return result;
+
+ // Create a dictionary for the aggregate device.
+ ScopedCFTypeRef<CFMutableDictionaryRef> aggregate_device_dict(
+ CreateAggregateDeviceDictionary(input_id, output_id));
+ if (!aggregate_device_dict)
+ return -1;
+
+ // Create the aggregate device.
+ static const AudioObjectPropertyAddress kCreateAggregateDeviceAddress = {
+ kAudioPlugInCreateAggregateDevice,
+ kAudioObjectPropertyScopeGlobal,
+ kAudioObjectPropertyElementMaster
+ };
+
+ UInt32 size = sizeof(*aggregate_device);
+ result = AudioObjectGetPropertyData(
+ plugin_id_,
+ &kCreateAggregateDeviceAddress,
+ sizeof(aggregate_device_dict),
+ &aggregate_device_dict,
+ &size,
+ aggregate_device);
+ if (result != noErr) {
+ DLOG(ERROR) << "Error creating aggregate audio device!";
+ return result;
+ }
+
+ // Set the sub-devices for the aggregate device.
+ // In this case we use two: the input and output devices.
+
+ ScopedCFTypeRef<CFStringRef> input_device_UID(GetDeviceUID(input_id));
+ ScopedCFTypeRef<CFStringRef> output_device_UID(GetDeviceUID(output_id));
+ if (!input_device_UID || !output_device_UID) {
+ DLOG(ERROR) << "Error getting audio device UID strings.";
+ return -1;
+ }
+
+ ScopedCFTypeRef<CFMutableArrayRef> sub_devices_array(
+ CreateSubDeviceArray(input_device_UID, output_device_UID));
+ if (sub_devices_array == NULL) {
+ DLOG(ERROR) << "Error creating sub-devices array.";
+ return -1;
+ }
+
+ static const AudioObjectPropertyAddress kSetSubDevicesAddress = {
+ kAudioAggregateDevicePropertyFullSubDeviceList,
+ kAudioObjectPropertyScopeGlobal,
+ kAudioObjectPropertyElementMaster
+ };
+
+ size = sizeof(CFMutableArrayRef);
+ result = AudioObjectSetPropertyData(
+ *aggregate_device,
+ &kSetSubDevicesAddress,
+ 0,
+ NULL,
+ size,
+ &sub_devices_array);
+ if (result != noErr) {
+ DLOG(ERROR) << "Error setting aggregate audio device sub-devices!";
+ return result;
+ }
+
+ // Use the input device as the master device.
+ static const AudioObjectPropertyAddress kSetMasterDeviceAddress = {
+ kAudioAggregateDevicePropertyMasterSubDevice,
+ kAudioObjectPropertyScopeGlobal,
+ kAudioObjectPropertyElementMaster
+ };
+
+ size = sizeof(CFStringRef);
+ result = AudioObjectSetPropertyData(
+ *aggregate_device,
+ &kSetMasterDeviceAddress,
+ 0,
+ NULL,
+ size,
+ &input_device_UID);
+ if (result != noErr) {
+ DLOG(ERROR) << "Error setting aggregate audio device master device!";
+ return result;
+ }
+
+ DVLOG(1) << "New aggregate device: " << *aggregate_device;
+ return noErr;
+}
+
+void AggregateDeviceManager::DestroyAggregateDevice() {
+ if (aggregate_device_ == kAudioObjectUnknown)
+ return;
+
+ static const AudioObjectPropertyAddress kDestroyAddress = {
+ kAudioPlugInDestroyAggregateDevice,
+ kAudioObjectPropertyScopeGlobal,
+ kAudioObjectPropertyElementMaster
+ };
+
+ UInt32 size = sizeof(aggregate_device_);
+ OSStatus result = AudioObjectGetPropertyData(
+ plugin_id_,
+ &kDestroyAddress,
+ 0,
+ NULL,
+ &size,
+ &aggregate_device_);
+ if (result != noErr) {
+ DLOG(ERROR) << "Error destroying aggregate audio device!";
+ return;
+ }
+
+ aggregate_device_ = kAudioObjectUnknown;
+}
+
+} // namespace media
diff --git a/media/audio/mac/aggregate_device_manager.h b/media/audio/mac/aggregate_device_manager.h
new file mode 100644
index 0000000..7b8b71f
--- /dev/null
+++ b/media/audio/mac/aggregate_device_manager.h
@@ -0,0 +1,58 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef MEDIA_AUDIO_MAC_AGGREGATE_DEVICE_MANAGER_H_
+#define MEDIA_AUDIO_MAC_AGGREGATE_DEVICE_MANAGER_H_
+
+#include <CoreAudio/CoreAudio.h>
+
+#include "base/basictypes.h"
+#include "base/compiler_specific.h"
+#include "media/base/media_export.h"
+
+namespace media {
+
+class MEDIA_EXPORT AggregateDeviceManager {
+ public:
+ AggregateDeviceManager();
+ ~AggregateDeviceManager();
+
+ // Lazily creates an aggregate device based on the default
+ // input and output devices.
+ // It will either return a valid device or kAudioDeviceUnknown
+ // if the default devices are not suitable for aggregate devices.
+ AudioDeviceID GetDefaultAggregateDevice();
+
+ private:
+ // The caller is responsible for releasing the CFStringRef.
+ static CFStringRef GetDeviceUID(AudioDeviceID id);
+
+ static void GetDeviceName(AudioDeviceID id, char* name, UInt32 size);
+ static UInt32 GetClockDomain(AudioDeviceID device_id);
+ static OSStatus GetPluginID(AudioObjectID* id);
+
+ CFMutableDictionaryRef CreateAggregateDeviceDictionary(
+ AudioDeviceID input_id,
+ AudioDeviceID output_id);
+
+ CFMutableArrayRef CreateSubDeviceArray(CFStringRef input_device_UID,
+ CFStringRef output_device_UID);
+
+ OSStatus CreateAggregateDevice(AudioDeviceID input_id,
+ AudioDeviceID output_id,
+ AudioDeviceID* aggregate_device);
+ void DestroyAggregateDevice();
+
+ AudioObjectID plugin_id_;
+ AudioDeviceID input_device_;
+ AudioDeviceID output_device_;
+
+ AudioDeviceID aggregate_device_;
+
+ DISALLOW_COPY_AND_ASSIGN(AggregateDeviceManager);
+};
+
+} // namespace media
+
+#endif // MEDIA_AUDIO_MAC_AGGREGATE_DEVICE_MANAGER_H_
diff --git a/media/audio/mac/audio_auhal_mac_unittest.cc b/media/audio/mac/audio_auhal_mac_unittest.cc
index fd0ffff..9babd80 100644
--- a/media/audio/mac/audio_auhal_mac_unittest.cc
+++ b/media/audio/mac/audio_auhal_mac_unittest.cc
@@ -45,7 +45,7 @@ class AUHALStreamTest : public testing::Test {
AudioOutputStream* Create() {
return manager_->MakeAudioOutputStream(
- manager_->GetDefaultOutputStreamParameters(), "");
+ manager_->GetDefaultOutputStreamParameters(), "", "");
}
bool CanRunAudioTests() {
diff --git a/media/audio/mac/audio_manager_mac.cc b/media/audio/mac/audio_manager_mac.cc
index e86e7b48..c08efff 100644
--- a/media/audio/mac/audio_manager_mac.cc
+++ b/media/audio/mac/audio_manager_mac.cc
@@ -20,6 +20,8 @@
#include "media/audio/mac/audio_input_mac.h"
#include "media/audio/mac/audio_low_latency_input_mac.h"
#include "media/audio/mac/audio_low_latency_output_mac.h"
+#include "media/audio/mac/audio_synchronized_mac.h"
+#include "media/audio/mac/audio_unified_mac.h"
#include "media/base/bind_to_current_loop.h"
#include "media/base/channel_layout.h"
#include "media/base/limits.h"
@@ -54,6 +56,16 @@ static bool HasAudioHardware(AudioObjectPropertySelector selector) {
output_device_id != kAudioObjectUnknown;
}
+// Returns true if the default input device is the same as
+// the default output device.
+bool AudioManagerMac::HasUnifiedDefaultIO() {
+ AudioDeviceID input_id, output_id;
+ if (!GetDefaultInputDevice(&input_id) || !GetDefaultOutputDevice(&output_id))
+ return false;
+
+ return input_id == output_id;
+}
+
// Retrieves information on audio devices, and prepends the default
// device to the list if the list is non-empty.
static void GetAudioDeviceInfo(bool is_input,
@@ -554,18 +566,72 @@ std::string AudioManagerMac::GetAssociatedOutputDeviceID(
AudioOutputStream* AudioManagerMac::MakeLinearOutputStream(
const AudioParameters& params) {
- return MakeLowLatencyOutputStream(params, std::string());
+ return MakeLowLatencyOutputStream(params, std::string(), std::string());
}
AudioOutputStream* AudioManagerMac::MakeLowLatencyOutputStream(
const AudioParameters& params,
- const std::string& device_id) {
- AudioDeviceID device = GetAudioDeviceIdByUId(false, device_id);
- if (device == kAudioObjectUnknown) {
- DLOG(ERROR) << "Failed to open output device: " << device_id;
+ const std::string& device_id,
+ const std::string& input_device_id) {
+ // Handle basic output with no input channels.
+ if (params.input_channels() == 0) {
+ AudioDeviceID device = GetAudioDeviceIdByUId(false, device_id);
+ if (device == kAudioObjectUnknown) {
+ DLOG(ERROR) << "Failed to open output device: " << device_id;
+ return NULL;
+ }
+ return new AUHALStream(this, params, device);
+ }
+
+ DLOG_IF(ERROR, !device_id.empty()) << "Not implemented!";
+
+ // TODO(xians): support more than stereo input.
+ if (params.input_channels() != 2) {
+ // WebAudio is currently hard-coded to 2 channels so we should not
+ // see this case.
+ NOTREACHED() << "Only stereo input is currently supported!";
return NULL;
}
- return new AUHALStream(this, params, device);
+
+ AudioDeviceID device = kAudioObjectUnknown;
+ if (HasUnifiedDefaultIO()) {
+ // For I/O, the simplest case is when the default input and output
+ // devices are the same.
+ GetDefaultOutputDevice(&device);
+ VLOG(0) << "UNIFIED: default input and output devices are identical";
+ } else {
+ // Some audio hardware is presented as separate input and output devices
+ // even though they are really the same physical hardware and
+ // share the same "clock domain" at the lowest levels of the driver.
+ // A common of example of this is the "built-in" audio hardware:
+ // "Built-in Line Input"
+ // "Built-in Output"
+ // We would like to use an "aggregate" device for these situations, since
+ // CoreAudio will make the most efficient use of the shared "clock domain"
+ // so we get the lowest latency and use fewer threads.
+ device = aggregate_device_manager_.GetDefaultAggregateDevice();
+ if (device != kAudioObjectUnknown)
+ VLOG(0) << "Using AGGREGATE audio device";
+ }
+
+ if (device != kAudioObjectUnknown &&
+ input_device_id == AudioManagerBase::kDefaultDeviceId)
+ return new AUHALStream(this, params, device);
+
+ // Fallback to AudioSynchronizedStream which will handle completely
+ // different and arbitrary combinations of input and output devices
+ // even running at different sample-rates.
+ // kAudioDeviceUnknown translates to "use default" here.
+ // TODO(xians): consider tracking UMA stats on AUHALStream
+ // versus AudioSynchronizedStream.
+ AudioDeviceID audio_device_id = GetAudioDeviceIdByUId(true, input_device_id);
+ if (audio_device_id == kAudioObjectUnknown)
+ return NULL;
+
+ return new AudioSynchronizedStream(this,
+ params,
+ audio_device_id,
+ kAudioDeviceUnknown);
}
std::string AudioManagerMac::GetDefaultOutputDeviceID() {
diff --git a/media/audio/mac/audio_manager_mac.h b/media/audio/mac/audio_manager_mac.h
index 0d4e05f..641f9d3 100644
--- a/media/audio/mac/audio_manager_mac.h
+++ b/media/audio/mac/audio_manager_mac.h
@@ -11,6 +11,7 @@
#include "base/basictypes.h"
#include "base/compiler_specific.h"
#include "media/audio/audio_manager_base.h"
+#include "media/audio/mac/aggregate_device_manager.h"
#include "media/audio/mac/audio_device_listener_mac.h"
namespace media {
@@ -39,7 +40,8 @@ class MEDIA_EXPORT AudioManagerMac : public AudioManagerBase {
const AudioParameters& params) OVERRIDE;
virtual AudioOutputStream* MakeLowLatencyOutputStream(
const AudioParameters& params,
- const std::string& device_id) OVERRIDE;
+ const std::string& device_id,
+ const std::string& input_device_id) OVERRIDE;
virtual AudioInputStream* MakeLinearInputStream(
const AudioParameters& params, const std::string& device_id) OVERRIDE;
virtual AudioInputStream* MakeLowLatencyInputStream(
@@ -78,6 +80,8 @@ class MEDIA_EXPORT AudioManagerMac : public AudioManagerBase {
const AudioParameters& input_params) OVERRIDE;
private:
+ bool HasUnifiedDefaultIO();
+
// Helper methods for constructing AudioDeviceListenerMac on the audio thread.
void CreateDeviceListener();
void DestroyDeviceListener();
@@ -95,6 +99,8 @@ class MEDIA_EXPORT AudioManagerMac : public AudioManagerBase {
int current_sample_rate_;
AudioDeviceID current_output_device_;
+ AggregateDeviceManager aggregate_device_manager_;
+
// Helper class which monitors power events to determine if output streams
// should defer Start() calls. Required to workaround an OSX bug. See
// http://crbug.com/160920 for more details.
diff --git a/media/audio/mac/audio_synchronized_mac.cc b/media/audio/mac/audio_synchronized_mac.cc
new file mode 100644
index 0000000..a9bc88e
--- /dev/null
+++ b/media/audio/mac/audio_synchronized_mac.cc
@@ -0,0 +1,976 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "media/audio/mac/audio_synchronized_mac.h"
+
+#include <CoreServices/CoreServices.h>
+#include <algorithm>
+
+#include "base/basictypes.h"
+#include "base/debug/trace_event.h"
+#include "base/logging.h"
+#include "base/mac/mac_logging.h"
+#include "media/audio/mac/audio_manager_mac.h"
+#include "media/base/channel_mixer.h"
+
+namespace media {
+
+static const int kHardwareBufferSize = 128;
+static const int kFifoSize = 16384;
+
+// TODO(crogers): handle the non-stereo case.
+static const int kChannels = 2;
+
+// This value was determined empirically for minimum latency while still
+// guarding against FIFO under-runs.
+static const int kBaseTargetFifoFrames = 256 + 64;
+
+// If the input and output sample-rate don't match, then we need to maintain
+// an additional safety margin due to the callback timing jitter and the
+// varispeed buffering. This value was empirically tuned.
+static const int kAdditionalTargetFifoFrames = 128;
+
+static void ZeroBufferList(AudioBufferList* buffer_list) {
+ for (size_t i = 0; i < buffer_list->mNumberBuffers; ++i)
+ memset(buffer_list->mBuffers[i].mData,
+ 0,
+ buffer_list->mBuffers[i].mDataByteSize);
+}
+
+static void WrapBufferList(AudioBufferList* buffer_list,
+ AudioBus* bus,
+ int frames) {
+ DCHECK(buffer_list);
+ DCHECK(bus);
+ int channels = bus->channels();
+ int buffer_list_channels = buffer_list->mNumberBuffers;
+
+ // Copy pointers from AudioBufferList.
+ int source_idx = 0;
+ for (int i = 0; i < channels; ++i) {
+ bus->SetChannelData(
+ i, static_cast<float*>(buffer_list->mBuffers[source_idx].mData));
+
+ // It's ok to pass in a |buffer_list| with fewer channels, in which
+ // case we just duplicate the last channel.
+ if (source_idx < buffer_list_channels - 1)
+ ++source_idx;
+ }
+
+ // Finally set the actual length.
+ bus->set_frames(frames);
+}
+
+AudioSynchronizedStream::AudioSynchronizedStream(
+ AudioManagerMac* manager,
+ const AudioParameters& params,
+ AudioDeviceID input_id,
+ AudioDeviceID output_id)
+ : manager_(manager),
+ params_(params),
+ input_sample_rate_(0),
+ output_sample_rate_(0),
+ input_id_(input_id),
+ output_id_(output_id),
+ input_buffer_list_(NULL),
+ fifo_(kChannels, kFifoSize),
+ target_fifo_frames_(kBaseTargetFifoFrames),
+ average_delta_(0.0),
+ fifo_rate_compensation_(1.0),
+ input_unit_(0),
+ varispeed_unit_(0),
+ output_unit_(0),
+ first_input_time_(-1),
+ is_running_(false),
+ hardware_buffer_size_(kHardwareBufferSize),
+ channels_(kChannels) {
+ VLOG(1) << "AudioSynchronizedStream::AudioSynchronizedStream()";
+}
+
+AudioSynchronizedStream::~AudioSynchronizedStream() {
+ DCHECK(!input_unit_);
+ DCHECK(!output_unit_);
+ DCHECK(!varispeed_unit_);
+}
+
+bool AudioSynchronizedStream::Open() {
+ if (params_.channels() != kChannels) {
+ LOG(ERROR) << "Only stereo output is currently supported.";
+ return false;
+ }
+
+ // Create the input, output, and varispeed AudioUnits.
+ OSStatus result = CreateAudioUnits();
+ if (result != noErr) {
+ LOG(ERROR) << "Cannot create AudioUnits.";
+ return false;
+ }
+
+ result = SetupInput(input_id_);
+ if (result != noErr) {
+ LOG(ERROR) << "Error configuring input AudioUnit.";
+ return false;
+ }
+
+ result = SetupOutput(output_id_);
+ if (result != noErr) {
+ LOG(ERROR) << "Error configuring output AudioUnit.";
+ return false;
+ }
+
+ result = SetupCallbacks();
+ if (result != noErr) {
+ LOG(ERROR) << "Error setting up callbacks on AudioUnits.";
+ return false;
+ }
+
+ result = SetupStreamFormats();
+ if (result != noErr) {
+ LOG(ERROR) << "Error configuring stream formats on AudioUnits.";
+ return false;
+ }
+
+ AllocateInputData();
+
+ // Final initialization of the AudioUnits.
+ result = AudioUnitInitialize(input_unit_);
+ if (result != noErr) {
+ LOG(ERROR) << "Error initializing input AudioUnit.";
+ return false;
+ }
+
+ result = AudioUnitInitialize(output_unit_);
+ if (result != noErr) {
+ LOG(ERROR) << "Error initializing output AudioUnit.";
+ return false;
+ }
+
+ result = AudioUnitInitialize(varispeed_unit_);
+ if (result != noErr) {
+ LOG(ERROR) << "Error initializing varispeed AudioUnit.";
+ return false;
+ }
+
+ if (input_sample_rate_ != output_sample_rate_) {
+ // Add extra safety margin.
+ target_fifo_frames_ += kAdditionalTargetFifoFrames;
+ }
+
+ // Buffer initial silence corresponding to target I/O buffering.
+ fifo_.Clear();
+ scoped_ptr<AudioBus> silence =
+ AudioBus::Create(channels_, target_fifo_frames_);
+ silence->Zero();
+ fifo_.Push(silence.get());
+
+ return true;
+}
+
+void AudioSynchronizedStream::Close() {
+ DCHECK(!is_running_);
+
+ if (input_buffer_list_) {
+ free(input_buffer_list_);
+ input_buffer_list_ = 0;
+ input_bus_.reset(NULL);
+ wrapper_bus_.reset(NULL);
+ }
+
+ if (input_unit_) {
+ AudioUnitUninitialize(input_unit_);
+ CloseComponent(input_unit_);
+ }
+
+ if (output_unit_) {
+ AudioUnitUninitialize(output_unit_);
+ CloseComponent(output_unit_);
+ }
+
+ if (varispeed_unit_) {
+ AudioUnitUninitialize(varispeed_unit_);
+ CloseComponent(varispeed_unit_);
+ }
+
+ input_unit_ = NULL;
+ output_unit_ = NULL;
+ varispeed_unit_ = NULL;
+
+ // Inform the audio manager that we have been closed. This can cause our
+ // destruction.
+ manager_->ReleaseOutputStream(this);
+}
+
+void AudioSynchronizedStream::Start(AudioSourceCallback* callback) {
+ DCHECK(callback);
+ DCHECK(input_unit_);
+ DCHECK(output_unit_);
+ DCHECK(varispeed_unit_);
+
+ if (is_running_ || !input_unit_ || !output_unit_ || !varispeed_unit_)
+ return;
+
+ source_ = callback;
+
+ // Reset state variables each time we Start().
+ fifo_rate_compensation_ = 1.0;
+ average_delta_ = 0.0;
+
+ OSStatus result = noErr;
+
+ if (!is_running_) {
+ first_input_time_ = -1;
+
+ result = AudioOutputUnitStart(input_unit_);
+ OSSTATUS_DCHECK(result == noErr, result);
+
+ if (result == noErr) {
+ result = AudioOutputUnitStart(output_unit_);
+ OSSTATUS_DCHECK(result == noErr, result);
+ }
+ }
+
+ is_running_ = true;
+}
+
+void AudioSynchronizedStream::Stop() {
+ OSStatus result = noErr;
+ if (is_running_) {
+ result = AudioOutputUnitStop(input_unit_);
+ OSSTATUS_DCHECK(result == noErr, result);
+
+ if (result == noErr) {
+ result = AudioOutputUnitStop(output_unit_);
+ OSSTATUS_DCHECK(result == noErr, result);
+ }
+ }
+
+ if (result == noErr)
+ is_running_ = false;
+}
+
+bool AudioSynchronizedStream::IsRunning() {
+ return is_running_;
+}
+
+// TODO(crogers): implement - or remove from AudioOutputStream.
+void AudioSynchronizedStream::SetVolume(double volume) {}
+void AudioSynchronizedStream::GetVolume(double* volume) {}
+
+OSStatus AudioSynchronizedStream::SetOutputDeviceAsCurrent(
+ AudioDeviceID output_id) {
+ OSStatus result = noErr;
+
+ // Get the default output device if device is unknown.
+ if (output_id == kAudioDeviceUnknown) {
+ AudioObjectPropertyAddress pa;
+ pa.mSelector = kAudioHardwarePropertyDefaultOutputDevice;
+ pa.mScope = kAudioObjectPropertyScopeGlobal;
+ pa.mElement = kAudioObjectPropertyElementMaster;
+ UInt32 size = sizeof(output_id);
+
+ result = AudioObjectGetPropertyData(
+ kAudioObjectSystemObject,
+ &pa,
+ 0,
+ 0,
+ &size,
+ &output_id);
+
+ OSSTATUS_DCHECK(result == noErr, result);
+ if (result != noErr)
+ return result;
+ }
+
+ // Set the render frame size.
+ UInt32 frame_size = hardware_buffer_size_;
+ AudioObjectPropertyAddress pa;
+ pa.mSelector = kAudioDevicePropertyBufferFrameSize;
+ pa.mScope = kAudioDevicePropertyScopeInput;
+ pa.mElement = kAudioObjectPropertyElementMaster;
+ result = AudioObjectSetPropertyData(
+ output_id,
+ &pa,
+ 0,
+ 0,
+ sizeof(frame_size),
+ &frame_size);
+
+ OSSTATUS_DCHECK(result == noErr, result);
+ if (result != noErr)
+ return result;
+
+ output_info_.Initialize(output_id, false);
+
+ // Set the Current Device to the Default Output Unit.
+ result = AudioUnitSetProperty(
+ output_unit_,
+ kAudioOutputUnitProperty_CurrentDevice,
+ kAudioUnitScope_Global,
+ 0,
+ &output_info_.id_,
+ sizeof(output_info_.id_));
+
+ OSSTATUS_DCHECK(result == noErr, result);
+ return result;
+}
+
+OSStatus AudioSynchronizedStream::SetInputDeviceAsCurrent(
+ AudioDeviceID input_id) {
+ OSStatus result = noErr;
+
+ // Get the default input device if device is unknown.
+ if (input_id == kAudioDeviceUnknown) {
+ AudioObjectPropertyAddress pa;
+ pa.mSelector = kAudioHardwarePropertyDefaultInputDevice;
+ pa.mScope = kAudioObjectPropertyScopeGlobal;
+ pa.mElement = kAudioObjectPropertyElementMaster;
+ UInt32 size = sizeof(input_id);
+
+ result = AudioObjectGetPropertyData(
+ kAudioObjectSystemObject,
+ &pa,
+ 0,
+ 0,
+ &size,
+ &input_id);
+
+ OSSTATUS_DCHECK(result == noErr, result);
+ if (result != noErr)
+ return result;
+ }
+
+ // Set the render frame size.
+ UInt32 frame_size = hardware_buffer_size_;
+ AudioObjectPropertyAddress pa;
+ pa.mSelector = kAudioDevicePropertyBufferFrameSize;
+ pa.mScope = kAudioDevicePropertyScopeInput;
+ pa.mElement = kAudioObjectPropertyElementMaster;
+ result = AudioObjectSetPropertyData(
+ input_id,
+ &pa,
+ 0,
+ 0,
+ sizeof(frame_size),
+ &frame_size);
+
+ OSSTATUS_DCHECK(result == noErr, result);
+ if (result != noErr)
+ return result;
+
+ input_info_.Initialize(input_id, true);
+
+ // Set the Current Device to the AUHAL.
+ // This should be done only after I/O has been enabled on the AUHAL.
+ result = AudioUnitSetProperty(
+ input_unit_,
+ kAudioOutputUnitProperty_CurrentDevice,
+ kAudioUnitScope_Global,
+ 0,
+ &input_info_.id_,
+ sizeof(input_info_.id_));
+
+ OSSTATUS_DCHECK(result == noErr, result);
+ return result;
+}
+
+OSStatus AudioSynchronizedStream::CreateAudioUnits() {
+ // Q: Why do we need a varispeed unit?
+ // A: If the input device and the output device are running at
+ // different sample rates and/or on different clocks, we will need
+ // to compensate to avoid a pitch change and
+ // to avoid buffer under and over runs.
+ ComponentDescription varispeed_desc;
+ varispeed_desc.componentType = kAudioUnitType_FormatConverter;
+ varispeed_desc.componentSubType = kAudioUnitSubType_Varispeed;
+ varispeed_desc.componentManufacturer = kAudioUnitManufacturer_Apple;
+ varispeed_desc.componentFlags = 0;
+ varispeed_desc.componentFlagsMask = 0;
+
+ Component varispeed_comp = FindNextComponent(NULL, &varispeed_desc);
+ if (varispeed_comp == NULL)
+ return -1;
+
+ OSStatus result = OpenAComponent(varispeed_comp, &varispeed_unit_);
+ OSSTATUS_DCHECK(result == noErr, result);
+ if (result != noErr)
+ return result;
+
+ // Open input AudioUnit.
+ ComponentDescription input_desc;
+ input_desc.componentType = kAudioUnitType_Output;
+ input_desc.componentSubType = kAudioUnitSubType_HALOutput;
+ input_desc.componentManufacturer = kAudioUnitManufacturer_Apple;
+ input_desc.componentFlags = 0;
+ input_desc.componentFlagsMask = 0;
+
+ Component input_comp = FindNextComponent(NULL, &input_desc);
+ if (input_comp == NULL)
+ return -1;
+
+ result = OpenAComponent(input_comp, &input_unit_);
+ OSSTATUS_DCHECK(result == noErr, result);
+ if (result != noErr)
+ return result;
+
+ // Open output AudioUnit.
+ ComponentDescription output_desc;
+ output_desc.componentType = kAudioUnitType_Output;
+ output_desc.componentSubType = kAudioUnitSubType_DefaultOutput;
+ output_desc.componentManufacturer = kAudioUnitManufacturer_Apple;
+ output_desc.componentFlags = 0;
+ output_desc.componentFlagsMask = 0;
+
+ Component output_comp = FindNextComponent(NULL, &output_desc);
+ if (output_comp == NULL)
+ return -1;
+
+ result = OpenAComponent(output_comp, &output_unit_);
+ OSSTATUS_DCHECK(result == noErr, result);
+ if (result != noErr)
+ return result;
+
+ return noErr;
+}
+
+OSStatus AudioSynchronizedStream::SetupInput(AudioDeviceID input_id) {
+ // The AUHAL used for input needs to be initialized
+ // before anything is done to it.
+ OSStatus result = AudioUnitInitialize(input_unit_);
+ OSSTATUS_DCHECK(result == noErr, result);
+ if (result != noErr)
+ return result;
+
+ // We must enable the Audio Unit (AUHAL) for input and disable output
+ // BEFORE setting the AUHAL's current device.
+ result = EnableIO();
+ OSSTATUS_DCHECK(result == noErr, result);
+ if (result != noErr)
+ return result;
+
+ result = SetInputDeviceAsCurrent(input_id);
+ OSSTATUS_DCHECK(result == noErr, result);
+
+ return result;
+}
+
+OSStatus AudioSynchronizedStream::EnableIO() {
+ // Enable input on the AUHAL.
+ UInt32 enable_io = 1;
+ OSStatus result = AudioUnitSetProperty(
+ input_unit_,
+ kAudioOutputUnitProperty_EnableIO,
+ kAudioUnitScope_Input,
+ 1, // input element
+ &enable_io,
+ sizeof(enable_io));
+
+ OSSTATUS_DCHECK(result == noErr, result);
+ if (result != noErr)
+ return result;
+
+ // Disable Output on the AUHAL.
+ enable_io = 0;
+ result = AudioUnitSetProperty(
+ input_unit_,
+ kAudioOutputUnitProperty_EnableIO,
+ kAudioUnitScope_Output,
+ 0, // output element
+ &enable_io,
+ sizeof(enable_io));
+
+ OSSTATUS_DCHECK(result == noErr, result);
+ return result;
+}
+
+OSStatus AudioSynchronizedStream::SetupOutput(AudioDeviceID output_id) {
+ OSStatus result = noErr;
+
+ result = SetOutputDeviceAsCurrent(output_id);
+ OSSTATUS_DCHECK(result == noErr, result);
+ if (result != noErr)
+ return result;
+
+ // Tell the output unit not to reset timestamps.
+ // Otherwise sample rate changes will cause sync loss.
+ UInt32 start_at_zero = 0;
+ result = AudioUnitSetProperty(
+ output_unit_,
+ kAudioOutputUnitProperty_StartTimestampsAtZero,
+ kAudioUnitScope_Global,
+ 0,
+ &start_at_zero,
+ sizeof(start_at_zero));
+
+ OSSTATUS_DCHECK(result == noErr, result);
+
+ return result;
+}
+
+OSStatus AudioSynchronizedStream::SetupCallbacks() {
+ // Set the input callback.
+ AURenderCallbackStruct callback;
+ callback.inputProc = InputProc;
+ callback.inputProcRefCon = this;
+ OSStatus result = AudioUnitSetProperty(
+ input_unit_,
+ kAudioOutputUnitProperty_SetInputCallback,
+ kAudioUnitScope_Global,
+ 0,
+ &callback,
+ sizeof(callback));
+
+ OSSTATUS_DCHECK(result == noErr, result);
+ if (result != noErr)
+ return result;
+
+ // Set the output callback.
+ callback.inputProc = OutputProc;
+ callback.inputProcRefCon = this;
+ result = AudioUnitSetProperty(
+ output_unit_,
+ kAudioUnitProperty_SetRenderCallback,
+ kAudioUnitScope_Input,
+ 0,
+ &callback,
+ sizeof(callback));
+
+ OSSTATUS_DCHECK(result == noErr, result);
+ if (result != noErr)
+ return result;
+
+ // Set the varispeed callback.
+ callback.inputProc = VarispeedProc;
+ callback.inputProcRefCon = this;
+ result = AudioUnitSetProperty(
+ varispeed_unit_,
+ kAudioUnitProperty_SetRenderCallback,
+ kAudioUnitScope_Input,
+ 0,
+ &callback,
+ sizeof(callback));
+
+ OSSTATUS_DCHECK(result == noErr, result);
+
+ return result;
+}
+
+OSStatus AudioSynchronizedStream::SetupStreamFormats() {
+ AudioStreamBasicDescription asbd, asbd_dev1_in, asbd_dev2_out;
+
+ // Get the Stream Format (Output client side).
+ UInt32 property_size = sizeof(asbd_dev1_in);
+ OSStatus result = AudioUnitGetProperty(
+ input_unit_,
+ kAudioUnitProperty_StreamFormat,
+ kAudioUnitScope_Input,
+ 1,
+ &asbd_dev1_in,
+ &property_size);
+
+ OSSTATUS_DCHECK(result == noErr, result);
+ if (result != noErr)
+ return result;
+
+ // Get the Stream Format (client side).
+ property_size = sizeof(asbd);
+ result = AudioUnitGetProperty(
+ input_unit_,
+ kAudioUnitProperty_StreamFormat,
+ kAudioUnitScope_Output,
+ 1,
+ &asbd,
+ &property_size);
+
+ OSSTATUS_DCHECK(result == noErr, result);
+ if (result != noErr)
+ return result;
+
+ // Get the Stream Format (Output client side).
+ property_size = sizeof(asbd_dev2_out);
+ result = AudioUnitGetProperty(
+ output_unit_,
+ kAudioUnitProperty_StreamFormat,
+ kAudioUnitScope_Output,
+ 0,
+ &asbd_dev2_out,
+ &property_size);
+
+ OSSTATUS_DCHECK(result == noErr, result);
+ if (result != noErr)
+ return result;
+
+ // Set the format of all the AUs to the input/output devices channel count.
+ // For a simple case, you want to set this to
+ // the lower of count of the channels in the input device vs output device.
+ asbd.mChannelsPerFrame = std::min(asbd_dev1_in.mChannelsPerFrame,
+ asbd_dev2_out.mChannelsPerFrame);
+
+ // We must get the sample rate of the input device and set it to the
+ // stream format of AUHAL.
+ Float64 rate = 0;
+ property_size = sizeof(rate);
+
+ AudioObjectPropertyAddress pa;
+ pa.mSelector = kAudioDevicePropertyNominalSampleRate;
+ pa.mScope = kAudioObjectPropertyScopeWildcard;
+ pa.mElement = kAudioObjectPropertyElementMaster;
+ result = AudioObjectGetPropertyData(
+ input_info_.id_,
+ &pa,
+ 0,
+ 0,
+ &property_size,
+ &rate);
+
+ OSSTATUS_DCHECK(result == noErr, result);
+ if (result != noErr)
+ return result;
+
+ input_sample_rate_ = rate;
+
+ asbd.mSampleRate = rate;
+ property_size = sizeof(asbd);
+
+ // Set the new formats to the AUs...
+ result = AudioUnitSetProperty(
+ input_unit_,
+ kAudioUnitProperty_StreamFormat,
+ kAudioUnitScope_Output,
+ 1,
+ &asbd,
+ property_size);
+
+ OSSTATUS_DCHECK(result == noErr, result);
+ if (result != noErr)
+ return result;
+
+ result = AudioUnitSetProperty(
+ varispeed_unit_,
+ kAudioUnitProperty_StreamFormat,
+ kAudioUnitScope_Input,
+ 0,
+ &asbd,
+ property_size);
+
+ OSSTATUS_DCHECK(result == noErr, result);
+ if (result != noErr)
+ return result;
+
+ // Set the correct sample rate for the output device,
+ // but keep the channel count the same.
+ property_size = sizeof(rate);
+
+ pa.mSelector = kAudioDevicePropertyNominalSampleRate;
+ pa.mScope = kAudioObjectPropertyScopeWildcard;
+ pa.mElement = kAudioObjectPropertyElementMaster;
+ result = AudioObjectGetPropertyData(
+ output_info_.id_,
+ &pa,
+ 0,
+ 0,
+ &property_size,
+ &rate);
+
+ OSSTATUS_DCHECK(result == noErr, result);
+ if (result != noErr)
+ return result;
+
+ output_sample_rate_ = rate;
+
+ // The requested sample-rate must match the hardware sample-rate.
+ if (output_sample_rate_ != params_.sample_rate()) {
+ LOG(ERROR) << "Requested sample-rate: " << params_.sample_rate()
+ << " must match the hardware sample-rate: " << output_sample_rate_;
+ return kAudioDeviceUnsupportedFormatError;
+ }
+
+ asbd.mSampleRate = rate;
+ property_size = sizeof(asbd);
+
+ // Set the new audio stream formats for the rest of the AUs...
+ result = AudioUnitSetProperty(
+ varispeed_unit_,
+ kAudioUnitProperty_StreamFormat,
+ kAudioUnitScope_Output,
+ 0,
+ &asbd,
+ property_size);
+
+ OSSTATUS_DCHECK(result == noErr, result);
+ if (result != noErr)
+ return result;
+
+ result = AudioUnitSetProperty(
+ output_unit_,
+ kAudioUnitProperty_StreamFormat,
+ kAudioUnitScope_Input,
+ 0,
+ &asbd,
+ property_size);
+
+ OSSTATUS_DCHECK(result == noErr, result);
+ return result;
+}
+
+void AudioSynchronizedStream::AllocateInputData() {
+ // Get the native number of input channels that the hardware supports.
+ int hardware_channels = 0;
+ bool got_hardware_channels = AudioManagerMac::GetDeviceChannels(
+ input_id_, kAudioDevicePropertyScopeInput, &hardware_channels);
+ if (!got_hardware_channels || hardware_channels > 2) {
+ // Only mono and stereo are supported on the input side. When it fails to
+ // get the native channel number or the native channel number is bigger
+ // than 2, we open the device in stereo mode.
+ hardware_channels = 2;
+ }
+
+ // Allocate storage for the AudioBufferList used for the
+ // input data from the input AudioUnit.
+ // We allocate enough space for with one AudioBuffer per channel.
+ size_t malloc_size = offsetof(AudioBufferList, mBuffers[0]) +
+ (sizeof(AudioBuffer) * hardware_channels);
+
+ input_buffer_list_ = static_cast<AudioBufferList*>(malloc(malloc_size));
+ input_buffer_list_->mNumberBuffers = hardware_channels;
+
+ input_bus_ = AudioBus::Create(hardware_channels, hardware_buffer_size_);
+ wrapper_bus_ = AudioBus::CreateWrapper(channels_);
+ if (hardware_channels != params_.input_channels()) {
+ ChannelLayout hardware_channel_layout =
+ GuessChannelLayout(hardware_channels);
+ ChannelLayout requested_channel_layout =
+ GuessChannelLayout(params_.input_channels());
+ channel_mixer_.reset(new ChannelMixer(hardware_channel_layout,
+ requested_channel_layout));
+ mixer_bus_ = AudioBus::Create(params_.input_channels(),
+ hardware_buffer_size_);
+ }
+
+ // Allocate buffers for AudioBufferList.
+ UInt32 buffer_size_bytes = input_bus_->frames() * sizeof(Float32);
+ for (size_t i = 0; i < input_buffer_list_->mNumberBuffers; ++i) {
+ input_buffer_list_->mBuffers[i].mNumberChannels = 1;
+ input_buffer_list_->mBuffers[i].mDataByteSize = buffer_size_bytes;
+ input_buffer_list_->mBuffers[i].mData = input_bus_->channel(i);
+ }
+}
+
+OSStatus AudioSynchronizedStream::HandleInputCallback(
+ AudioUnitRenderActionFlags* io_action_flags,
+ const AudioTimeStamp* time_stamp,
+ UInt32 bus_number,
+ UInt32 number_of_frames,
+ AudioBufferList* io_data) {
+ TRACE_EVENT0("audio", "AudioSynchronizedStream::HandleInputCallback");
+
+ if (first_input_time_ < 0.0)
+ first_input_time_ = time_stamp->mSampleTime;
+
+ // Get the new audio input data.
+ OSStatus result = AudioUnitRender(
+ input_unit_,
+ io_action_flags,
+ time_stamp,
+ bus_number,
+ number_of_frames,
+ input_buffer_list_);
+
+ // TODO(xians): Add back the DCHECK after synchronize IO supports all
+ // combination of input and output params. See http://issue/246521.
+ if (result != noErr)
+ return result;
+
+ // Buffer input into FIFO.
+ int available_frames = fifo_.max_frames() - fifo_.frames();
+ if (input_bus_->frames() <= available_frames) {
+ if (channel_mixer_) {
+ channel_mixer_->Transform(input_bus_.get(), mixer_bus_.get());
+ fifo_.Push(mixer_bus_.get());
+ } else {
+ fifo_.Push(input_bus_.get());
+ }
+ }
+
+ return result;
+}
+
+OSStatus AudioSynchronizedStream::HandleVarispeedCallback(
+ AudioUnitRenderActionFlags* io_action_flags,
+ const AudioTimeStamp* time_stamp,
+ UInt32 bus_number,
+ UInt32 number_of_frames,
+ AudioBufferList* io_data) {
+ // Create a wrapper bus on the AudioBufferList.
+ WrapBufferList(io_data, wrapper_bus_.get(), number_of_frames);
+
+ if (fifo_.frames() < static_cast<int>(number_of_frames)) {
+ // We don't DCHECK here, since this is a possible run-time condition
+ // if the machine is bogged down.
+ wrapper_bus_->Zero();
+ return noErr;
+ }
+
+ // Read from the FIFO to feed the varispeed.
+ fifo_.Consume(wrapper_bus_.get(), 0, number_of_frames);
+
+ return noErr;
+}
+
+OSStatus AudioSynchronizedStream::HandleOutputCallback(
+ AudioUnitRenderActionFlags* io_action_flags,
+ const AudioTimeStamp* time_stamp,
+ UInt32 bus_number,
+ UInt32 number_of_frames,
+ AudioBufferList* io_data) {
+ // Input callback hasn't run yet or we've suddenly changed sample-rates
+ // -> silence.
+ if (first_input_time_ < 0.0 ||
+ static_cast<int>(number_of_frames) != params_.frames_per_buffer()) {
+ ZeroBufferList(io_data);
+ return noErr;
+ }
+
+ // Use the varispeed playback rate to offset small discrepancies
+ // in hardware clocks, and also any differences in sample-rate
+ // between input and output devices.
+
+ // Calculate a varispeed rate scalar factor to compensate for drift between
+ // input and output. We use the actual number of frames still in the FIFO
+ // compared with the ideal value of |target_fifo_frames_|.
+ int delta = fifo_.frames() - target_fifo_frames_;
+
+ // Average |delta| because it can jitter back/forth quite frequently
+ // by +/- the hardware buffer-size *if* the input and output callbacks are
+ // happening at almost exactly the same time. Also, if the input and output
+ // sample-rates are different then |delta| will jitter quite a bit due to
+ // the rate conversion happening in the varispeed, plus the jittering of
+ // the callbacks. The average value is what's important here.
+ average_delta_ += (delta - average_delta_) * 0.1;
+
+ // Compute a rate compensation which always attracts us back to the
+ // |target_fifo_frames_| over a period of kCorrectionTimeSeconds.
+ const double kCorrectionTimeSeconds = 0.1;
+ double correction_time_frames = kCorrectionTimeSeconds * output_sample_rate_;
+ fifo_rate_compensation_ =
+ (correction_time_frames + average_delta_) / correction_time_frames;
+
+ // Adjust for FIFO drift.
+ OSStatus result = AudioUnitSetParameter(
+ varispeed_unit_,
+ kVarispeedParam_PlaybackRate,
+ kAudioUnitScope_Global,
+ 0,
+ fifo_rate_compensation_,
+ 0);
+
+ OSSTATUS_DCHECK(result == noErr, result);
+ if (result != noErr)
+ return result;
+
+ // Render to the output using the varispeed.
+ result = AudioUnitRender(
+ varispeed_unit_,
+ io_action_flags,
+ time_stamp,
+ 0,
+ number_of_frames,
+ io_data);
+
+ OSSTATUS_DCHECK(result == noErr, result);
+ if (result != noErr)
+ return result;
+
+ // Create a wrapper bus on the AudioBufferList.
+ WrapBufferList(io_data, wrapper_bus_.get(), number_of_frames);
+
+ // Process in-place!
+ source_->OnMoreIOData(wrapper_bus_.get(),
+ wrapper_bus_.get(),
+ AudioBuffersState(0, 0));
+
+ return noErr;
+}
+
+OSStatus AudioSynchronizedStream::InputProc(
+ void* user_data,
+ AudioUnitRenderActionFlags* io_action_flags,
+ const AudioTimeStamp* time_stamp,
+ UInt32 bus_number,
+ UInt32 number_of_frames,
+ AudioBufferList* io_data) {
+ AudioSynchronizedStream* stream =
+ static_cast<AudioSynchronizedStream*>(user_data);
+ DCHECK(stream);
+
+ return stream->HandleInputCallback(
+ io_action_flags,
+ time_stamp,
+ bus_number,
+ number_of_frames,
+ io_data);
+}
+
+OSStatus AudioSynchronizedStream::VarispeedProc(
+ void* user_data,
+ AudioUnitRenderActionFlags* io_action_flags,
+ const AudioTimeStamp* time_stamp,
+ UInt32 bus_number,
+ UInt32 number_of_frames,
+ AudioBufferList* io_data) {
+ AudioSynchronizedStream* stream =
+ static_cast<AudioSynchronizedStream*>(user_data);
+ DCHECK(stream);
+
+ return stream->HandleVarispeedCallback(
+ io_action_flags,
+ time_stamp,
+ bus_number,
+ number_of_frames,
+ io_data);
+}
+
+OSStatus AudioSynchronizedStream::OutputProc(
+ void* user_data,
+ AudioUnitRenderActionFlags* io_action_flags,
+ const AudioTimeStamp* time_stamp,
+ UInt32 bus_number,
+ UInt32 number_of_frames,
+ AudioBufferList* io_data) {
+ AudioSynchronizedStream* stream =
+ static_cast<AudioSynchronizedStream*>(user_data);
+ DCHECK(stream);
+
+ return stream->HandleOutputCallback(
+ io_action_flags,
+ time_stamp,
+ bus_number,
+ number_of_frames,
+ io_data);
+}
+
+void AudioSynchronizedStream::AudioDeviceInfo::Initialize(
+ AudioDeviceID id, bool is_input) {
+ id_ = id;
+ is_input_ = is_input;
+ if (id_ == kAudioDeviceUnknown)
+ return;
+
+ UInt32 property_size = sizeof(buffer_size_frames_);
+
+ AudioObjectPropertyAddress pa;
+ pa.mSelector = kAudioDevicePropertyBufferFrameSize;
+ pa.mScope = kAudioObjectPropertyScopeWildcard;
+ pa.mElement = kAudioObjectPropertyElementMaster;
+ OSStatus result = AudioObjectGetPropertyData(
+ id_,
+ &pa,
+ 0,
+ 0,
+ &property_size,
+ &buffer_size_frames_);
+
+ OSSTATUS_DCHECK(result == noErr, result);
+}
+
+} // namespace media
diff --git a/media/audio/mac/audio_synchronized_mac.h b/media/audio/mac/audio_synchronized_mac.h
new file mode 100644
index 0000000..a6db48e
--- /dev/null
+++ b/media/audio/mac/audio_synchronized_mac.h
@@ -0,0 +1,216 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef MEDIA_AUDIO_MAC_AUDIO_SYNCHRONIZED_MAC_H_
+#define MEDIA_AUDIO_MAC_AUDIO_SYNCHRONIZED_MAC_H_
+
+#include <AudioToolbox/AudioToolbox.h>
+#include <AudioUnit/AudioUnit.h>
+#include <CoreAudio/CoreAudio.h>
+
+#include "base/compiler_specific.h"
+#include "base/synchronization/lock.h"
+#include "media/audio/audio_io.h"
+#include "media/audio/audio_parameters.h"
+#include "media/base/audio_bus.h"
+#include "media/base/audio_fifo.h"
+
+namespace media {
+
+class AudioManagerMac;
+class ChannelMixer;
+
+// AudioSynchronizedStream allows arbitrary combinations of input and output
+// devices running off different clocks and using different drivers, with
+// potentially differing sample-rates. It implements AudioOutputStream
+// and shuttles its synchronized I/O data using AudioSourceCallback.
+//
+// It is required to first acquire the native sample rate of the selected
+// output device and then use the same rate when creating this object.
+//
+// ............................................................................
+// Theory of Operation:
+// .
+// INPUT THREAD . OUTPUT THREAD
+// +-----------------+ +------+ .
+// | Input AudioUnit | --> | | .
+// +-----------------+ | | .
+// | FIFO | .
+// | | +-----------+
+// | | -----> | Varispeed |
+// | | +-----------+
+// +------+ . |
+// . | +-----------+
+// . OnMoreIOData() --> | Output AU |
+// . +-----------+
+//
+// The input AudioUnit's InputProc is called on one thread which feeds the
+// FIFO. The output AudioUnit's OutputProc is called on a second thread
+// which pulls on the varispeed to get the current input data. The varispeed
+// handles mismatches between input and output sample-rate and also clock drift
+// between the input and output drivers. The varispeed consumes its data from
+// the FIFO and adjusts its rate dynamically according to the amount
+// of data buffered in the FIFO. If the FIFO starts getting too much data
+// buffered then the varispeed will speed up slightly to compensate
+// and similarly if the FIFO doesn't have enough data buffered then the
+// varispeed will slow down slightly.
+//
+// Finally, once the input data is available then OnMoreIOData() is called
+// which is given this input, and renders the output which is finally sent
+// to the Output AudioUnit.
+class AudioSynchronizedStream : public AudioOutputStream {
+ public:
+ // The ctor takes all the usual parameters, plus |manager| which is the
+ // the audio manager who is creating this object.
+ AudioSynchronizedStream(AudioManagerMac* manager,
+ const AudioParameters& params,
+ AudioDeviceID input_id,
+ AudioDeviceID output_id);
+
+ virtual ~AudioSynchronizedStream();
+
+ // Implementation of AudioOutputStream.
+ virtual bool Open() OVERRIDE;
+ virtual void Close() OVERRIDE;
+ virtual void Start(AudioSourceCallback* callback) OVERRIDE;
+ virtual void Stop() OVERRIDE;
+
+ virtual void SetVolume(double volume) OVERRIDE;
+ virtual void GetVolume(double* volume) OVERRIDE;
+
+ OSStatus SetInputDeviceAsCurrent(AudioDeviceID input_id);
+ OSStatus SetOutputDeviceAsCurrent(AudioDeviceID output_id);
+ AudioDeviceID GetInputDeviceID() { return input_info_.id_; }
+ AudioDeviceID GetOutputDeviceID() { return output_info_.id_; }
+
+ bool IsRunning();
+
+ private:
+ // Initialization.
+ OSStatus CreateAudioUnits();
+ OSStatus SetupInput(AudioDeviceID input_id);
+ OSStatus EnableIO();
+ OSStatus SetupOutput(AudioDeviceID output_id);
+ OSStatus SetupCallbacks();
+ OSStatus SetupStreamFormats();
+ void AllocateInputData();
+
+ // Handlers for the AudioUnit callbacks.
+ OSStatus HandleInputCallback(AudioUnitRenderActionFlags* io_action_flags,
+ const AudioTimeStamp* time_stamp,
+ UInt32 bus_number,
+ UInt32 number_of_frames,
+ AudioBufferList* io_data);
+
+ OSStatus HandleVarispeedCallback(AudioUnitRenderActionFlags* io_action_flags,
+ const AudioTimeStamp* time_stamp,
+ UInt32 bus_number,
+ UInt32 number_of_frames,
+ AudioBufferList* io_data);
+
+ OSStatus HandleOutputCallback(AudioUnitRenderActionFlags* io_action_flags,
+ const AudioTimeStamp* time_stamp,
+ UInt32 bus_number,
+ UInt32 number_of_frames,
+ AudioBufferList* io_data);
+
+ // AudioUnit callbacks.
+ static OSStatus InputProc(void* user_data,
+ AudioUnitRenderActionFlags* io_action_flags,
+ const AudioTimeStamp* time_stamp,
+ UInt32 bus_number,
+ UInt32 number_of_frames,
+ AudioBufferList* io_data);
+
+ static OSStatus VarispeedProc(void* user_data,
+ AudioUnitRenderActionFlags* io_action_flags,
+ const AudioTimeStamp* time_stamp,
+ UInt32 bus_number,
+ UInt32 number_of_frames,
+ AudioBufferList* io_data);
+
+ static OSStatus OutputProc(void* user_data,
+ AudioUnitRenderActionFlags* io_action_flags,
+ const AudioTimeStamp* time_stamp,
+ UInt32 bus_number,
+ UInt32 number_of_frames,
+ AudioBufferList* io_data);
+
+ // Our creator.
+ AudioManagerMac* manager_;
+
+ // Client parameters.
+ AudioParameters params_;
+
+ double input_sample_rate_;
+ double output_sample_rate_;
+
+ // Pointer to the object that will provide the audio samples.
+ AudioSourceCallback* source_;
+
+ // Values used in Open().
+ AudioDeviceID input_id_;
+ AudioDeviceID output_id_;
+
+ // The input AudioUnit renders its data here.
+ AudioBufferList* input_buffer_list_;
+
+ // Holds the actual data for |input_buffer_list_|.
+ scoped_ptr<AudioBus> input_bus_;
+
+ // Used to overlay AudioBufferLists.
+ scoped_ptr<AudioBus> wrapper_bus_;
+
+ class AudioDeviceInfo {
+ public:
+ AudioDeviceInfo()
+ : id_(kAudioDeviceUnknown),
+ is_input_(false),
+ buffer_size_frames_(0) {}
+ void Initialize(AudioDeviceID inID, bool isInput);
+ bool IsInitialized() const { return id_ != kAudioDeviceUnknown; }
+
+ AudioDeviceID id_;
+ bool is_input_;
+ UInt32 buffer_size_frames_;
+ };
+
+ AudioDeviceInfo input_info_;
+ AudioDeviceInfo output_info_;
+
+ // Used for input to output buffering.
+ AudioFifo fifo_;
+
+ // The optimal number of frames we'd like to keep in the FIFO at all times.
+ int target_fifo_frames_;
+
+ // A running average of the measured delta between actual number of frames
+ // in the FIFO versus |target_fifo_frames_|.
+ double average_delta_;
+
+ // A varispeed rate scalar which is calculated based on FIFO drift.
+ double fifo_rate_compensation_;
+
+ // AudioUnits.
+ AudioUnit input_unit_;
+ AudioUnit varispeed_unit_;
+ AudioUnit output_unit_;
+
+ double first_input_time_;
+
+ bool is_running_;
+ int hardware_buffer_size_;
+ int channels_;
+
+ // Channel mixer used to transform mono to stereo data. It is only created
+ // if the input_hardware_channels is mono.
+ scoped_ptr<ChannelMixer> channel_mixer_;
+ scoped_ptr<AudioBus> mixer_bus_;
+
+ DISALLOW_COPY_AND_ASSIGN(AudioSynchronizedStream);
+};
+
+} // namespace media
+
+#endif // MEDIA_AUDIO_MAC_AUDIO_SYNCHRONIZED_MAC_H_
diff --git a/media/audio/mac/audio_unified_mac.cc b/media/audio/mac/audio_unified_mac.cc
new file mode 100644
index 0000000..d1dc007
--- /dev/null
+++ b/media/audio/mac/audio_unified_mac.cc
@@ -0,0 +1,397 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "media/audio/mac/audio_unified_mac.h"
+
+#include <CoreServices/CoreServices.h>
+
+#include "base/basictypes.h"
+#include "base/logging.h"
+#include "base/mac/mac_logging.h"
+#include "media/audio/mac/audio_manager_mac.h"
+
+namespace media {
+
+// TODO(crogers): support more than hard-coded stereo input.
+// Ideally we would like to receive this value as a constructor argument.
+static const int kDefaultInputChannels = 2;
+
+AudioHardwareUnifiedStream::AudioHardwareUnifiedStream(
+ AudioManagerMac* manager, const AudioParameters& params)
+ : manager_(manager),
+ source_(NULL),
+ client_input_channels_(kDefaultInputChannels),
+ volume_(1.0f),
+ input_channels_(0),
+ output_channels_(0),
+ input_channels_per_frame_(0),
+ output_channels_per_frame_(0),
+ io_proc_id_(0),
+ device_(kAudioObjectUnknown),
+ is_playing_(false) {
+ DCHECK(manager_);
+
+ // A frame is one sample across all channels. In interleaved audio the per
+ // frame fields identify the set of n |channels|. In uncompressed audio, a
+ // packet is always one frame.
+ format_.mSampleRate = params.sample_rate();
+ format_.mFormatID = kAudioFormatLinearPCM;
+ format_.mFormatFlags = kLinearPCMFormatFlagIsPacked |
+ kLinearPCMFormatFlagIsSignedInteger;
+ format_.mBitsPerChannel = params.bits_per_sample();
+ format_.mChannelsPerFrame = params.channels();
+ format_.mFramesPerPacket = 1;
+ format_.mBytesPerPacket = (format_.mBitsPerChannel * params.channels()) / 8;
+ format_.mBytesPerFrame = format_.mBytesPerPacket;
+ format_.mReserved = 0;
+
+ // Calculate the number of sample frames per callback.
+ number_of_frames_ = params.GetBytesPerBuffer() / format_.mBytesPerPacket;
+
+ input_bus_ = AudioBus::Create(client_input_channels_,
+ params.frames_per_buffer());
+ output_bus_ = AudioBus::Create(params);
+}
+
+AudioHardwareUnifiedStream::~AudioHardwareUnifiedStream() {
+ DCHECK_EQ(device_, kAudioObjectUnknown);
+}
+
+bool AudioHardwareUnifiedStream::Open() {
+ // Obtain the current output device selected by the user.
+ AudioObjectPropertyAddress pa;
+ pa.mSelector = kAudioHardwarePropertyDefaultOutputDevice;
+ pa.mScope = kAudioObjectPropertyScopeGlobal;
+ pa.mElement = kAudioObjectPropertyElementMaster;
+
+ UInt32 size = sizeof(device_);
+
+ OSStatus result = AudioObjectGetPropertyData(
+ kAudioObjectSystemObject,
+ &pa,
+ 0,
+ 0,
+ &size,
+ &device_);
+
+ if ((result != kAudioHardwareNoError) || (device_ == kAudioDeviceUnknown)) {
+ LOG(ERROR) << "Cannot open unified AudioDevice.";
+ return false;
+ }
+
+ // The requested sample-rate must match the hardware sample-rate.
+ Float64 sample_rate = 0.0;
+ size = sizeof(sample_rate);
+
+ pa.mSelector = kAudioDevicePropertyNominalSampleRate;
+ pa.mScope = kAudioObjectPropertyScopeWildcard;
+ pa.mElement = kAudioObjectPropertyElementMaster;
+
+ result = AudioObjectGetPropertyData(
+ device_,
+ &pa,
+ 0,
+ 0,
+ &size,
+ &sample_rate);
+
+ if (result != noErr || sample_rate != format_.mSampleRate) {
+ LOG(ERROR) << "Requested sample-rate: " << format_.mSampleRate
+ << " must match the hardware sample-rate: " << sample_rate;
+ return false;
+ }
+
+ // Configure buffer frame size.
+ UInt32 frame_size = number_of_frames_;
+
+ pa.mSelector = kAudioDevicePropertyBufferFrameSize;
+ pa.mScope = kAudioDevicePropertyScopeInput;
+ pa.mElement = kAudioObjectPropertyElementMaster;
+ result = AudioObjectSetPropertyData(
+ device_,
+ &pa,
+ 0,
+ 0,
+ sizeof(frame_size),
+ &frame_size);
+
+ if (result != noErr) {
+ LOG(ERROR) << "Unable to set input buffer frame size: " << frame_size;
+ return false;
+ }
+
+ pa.mScope = kAudioDevicePropertyScopeOutput;
+ result = AudioObjectSetPropertyData(
+ device_,
+ &pa,
+ 0,
+ 0,
+ sizeof(frame_size),
+ &frame_size);
+
+ if (result != noErr) {
+ LOG(ERROR) << "Unable to set output buffer frame size: " << frame_size;
+ return false;
+ }
+
+ DVLOG(1) << "Sample rate: " << sample_rate;
+ DVLOG(1) << "Frame size: " << frame_size;
+
+ // Determine the number of input and output channels.
+ // We handle both the interleaved and non-interleaved cases.
+
+ // Get input stream configuration.
+ pa.mSelector = kAudioDevicePropertyStreamConfiguration;
+ pa.mScope = kAudioDevicePropertyScopeInput;
+ pa.mElement = kAudioObjectPropertyElementMaster;
+
+ result = AudioObjectGetPropertyDataSize(device_, &pa, 0, 0, &size);
+ OSSTATUS_DCHECK(result == noErr, result);
+
+ if (result == noErr && size > 0) {
+ // Allocate storage.
+ scoped_ptr<uint8[]> input_list_storage(new uint8[size]);
+ AudioBufferList& input_list =
+ *reinterpret_cast<AudioBufferList*>(input_list_storage.get());
+
+ result = AudioObjectGetPropertyData(
+ device_,
+ &pa,
+ 0,
+ 0,
+ &size,
+ &input_list);
+ OSSTATUS_DCHECK(result == noErr, result);
+
+ if (result == noErr) {
+ // Determine number of input channels.
+ input_channels_per_frame_ = input_list.mNumberBuffers > 0 ?
+ input_list.mBuffers[0].mNumberChannels : 0;
+ if (input_channels_per_frame_ == 1 && input_list.mNumberBuffers > 1) {
+ // Non-interleaved.
+ input_channels_ = input_list.mNumberBuffers;
+ } else {
+ // Interleaved.
+ input_channels_ = input_channels_per_frame_;
+ }
+ }
+ }
+
+ DVLOG(1) << "Input channels: " << input_channels_;
+ DVLOG(1) << "Input channels per frame: " << input_channels_per_frame_;
+
+ // The hardware must have at least the requested input channels.
+ if (result != noErr || client_input_channels_ > input_channels_) {
+ LOG(ERROR) << "AudioDevice does not support requested input channels.";
+ return false;
+ }
+
+ // Get output stream configuration.
+ pa.mSelector = kAudioDevicePropertyStreamConfiguration;
+ pa.mScope = kAudioDevicePropertyScopeOutput;
+ pa.mElement = kAudioObjectPropertyElementMaster;
+
+ result = AudioObjectGetPropertyDataSize(device_, &pa, 0, 0, &size);
+ OSSTATUS_DCHECK(result == noErr, result);
+
+ if (result == noErr && size > 0) {
+ // Allocate storage.
+ scoped_ptr<uint8[]> output_list_storage(new uint8[size]);
+ AudioBufferList& output_list =
+ *reinterpret_cast<AudioBufferList*>(output_list_storage.get());
+
+ result = AudioObjectGetPropertyData(
+ device_,
+ &pa,
+ 0,
+ 0,
+ &size,
+ &output_list);
+ OSSTATUS_DCHECK(result == noErr, result);
+
+ if (result == noErr) {
+ // Determine number of output channels.
+ output_channels_per_frame_ = output_list.mBuffers[0].mNumberChannels;
+ if (output_channels_per_frame_ == 1 && output_list.mNumberBuffers > 1) {
+ // Non-interleaved.
+ output_channels_ = output_list.mNumberBuffers;
+ } else {
+ // Interleaved.
+ output_channels_ = output_channels_per_frame_;
+ }
+ }
+ }
+
+ DVLOG(1) << "Output channels: " << output_channels_;
+ DVLOG(1) << "Output channels per frame: " << output_channels_per_frame_;
+
+ // The hardware must have at least the requested output channels.
+ if (result != noErr ||
+ output_channels_ < static_cast<int>(format_.mChannelsPerFrame)) {
+ LOG(ERROR) << "AudioDevice does not support requested output channels.";
+ return false;
+ }
+
+ // Setup the I/O proc.
+ result = AudioDeviceCreateIOProcID(device_, RenderProc, this, &io_proc_id_);
+ if (result != noErr) {
+ LOG(ERROR) << "Error creating IOProc.";
+ return false;
+ }
+
+ return true;
+}
+
+void AudioHardwareUnifiedStream::Close() {
+ DCHECK(!is_playing_);
+
+ OSStatus result = AudioDeviceDestroyIOProcID(device_, io_proc_id_);
+ OSSTATUS_DCHECK(result == noErr, result);
+
+ io_proc_id_ = 0;
+ device_ = kAudioObjectUnknown;
+
+ // Inform the audio manager that we have been closed. This can cause our
+ // destruction.
+ manager_->ReleaseOutputStream(this);
+}
+
+void AudioHardwareUnifiedStream::Start(AudioSourceCallback* callback) {
+ DCHECK(callback);
+ DCHECK_NE(device_, kAudioObjectUnknown);
+ DCHECK(!is_playing_);
+ if (device_ == kAudioObjectUnknown || is_playing_)
+ return;
+
+ source_ = callback;
+
+ OSStatus result = AudioDeviceStart(device_, io_proc_id_);
+ OSSTATUS_DCHECK(result == noErr, result);
+
+ if (result == noErr)
+ is_playing_ = true;
+}
+
+void AudioHardwareUnifiedStream::Stop() {
+ if (!is_playing_)
+ return;
+
+ if (device_ != kAudioObjectUnknown) {
+ OSStatus result = AudioDeviceStop(device_, io_proc_id_);
+ OSSTATUS_DCHECK(result == noErr, result);
+ }
+
+ is_playing_ = false;
+ source_ = NULL;
+}
+
+void AudioHardwareUnifiedStream::SetVolume(double volume) {
+ volume_ = static_cast<float>(volume);
+ // TODO(crogers): set volume property
+}
+
+void AudioHardwareUnifiedStream::GetVolume(double* volume) {
+ *volume = volume_;
+}
+
+// Pulls on our provider with optional input, asking it to render output.
+// Note to future hackers of this function: Do not add locks here because this
+// is running on a real-time thread (for low-latency).
+OSStatus AudioHardwareUnifiedStream::Render(
+ AudioDeviceID device,
+ const AudioTimeStamp* now,
+ const AudioBufferList* input_data,
+ const AudioTimeStamp* input_time,
+ AudioBufferList* output_data,
+ const AudioTimeStamp* output_time) {
+ // Convert the input data accounting for possible interleaving.
+ // TODO(crogers): it's better to simply memcpy() if source is already planar.
+ if (input_channels_ >= client_input_channels_) {
+ for (int channel_index = 0; channel_index < client_input_channels_;
+ ++channel_index) {
+ float* source;
+
+ int source_channel_index = channel_index;
+
+ if (input_channels_per_frame_ > 1) {
+ // Interleaved.
+ source = static_cast<float*>(input_data->mBuffers[0].mData) +
+ source_channel_index;
+ } else {
+ // Non-interleaved.
+ source = static_cast<float*>(
+ input_data->mBuffers[source_channel_index].mData);
+ }
+
+ float* p = input_bus_->channel(channel_index);
+ for (int i = 0; i < number_of_frames_; ++i) {
+ p[i] = *source;
+ source += input_channels_per_frame_;
+ }
+ }
+ } else if (input_channels_) {
+ input_bus_->Zero();
+ }
+
+ // Give the client optional input data and have it render the output data.
+ source_->OnMoreIOData(input_bus_.get(),
+ output_bus_.get(),
+ AudioBuffersState(0, 0));
+
+ // TODO(crogers): handle final Core Audio 5.1 layout for 5.1 audio.
+
+ // Handle interleaving as necessary.
+ // TODO(crogers): it's better to simply memcpy() if dest is already planar.
+
+ for (int channel_index = 0;
+ channel_index < static_cast<int>(format_.mChannelsPerFrame);
+ ++channel_index) {
+ float* dest;
+
+ int dest_channel_index = channel_index;
+
+ if (output_channels_per_frame_ > 1) {
+ // Interleaved.
+ dest = static_cast<float*>(output_data->mBuffers[0].mData) +
+ dest_channel_index;
+ } else {
+ // Non-interleaved.
+ dest = static_cast<float*>(
+ output_data->mBuffers[dest_channel_index].mData);
+ }
+
+ float* p = output_bus_->channel(channel_index);
+ for (int i = 0; i < number_of_frames_; ++i) {
+ *dest = p[i];
+ dest += output_channels_per_frame_;
+ }
+ }
+
+ return noErr;
+}
+
+OSStatus AudioHardwareUnifiedStream::RenderProc(
+ AudioDeviceID device,
+ const AudioTimeStamp* now,
+ const AudioBufferList* input_data,
+ const AudioTimeStamp* input_time,
+ AudioBufferList* output_data,
+ const AudioTimeStamp* output_time,
+ void* user_data) {
+ AudioHardwareUnifiedStream* audio_output =
+ static_cast<AudioHardwareUnifiedStream*>(user_data);
+ DCHECK(audio_output);
+ if (!audio_output)
+ return -1;
+
+ return audio_output->Render(
+ device,
+ now,
+ input_data,
+ input_time,
+ output_data,
+ output_time);
+}
+
+} // namespace media
diff --git a/media/audio/mac/audio_unified_mac.h b/media/audio/mac/audio_unified_mac.h
new file mode 100644
index 0000000..ff090e3
--- /dev/null
+++ b/media/audio/mac/audio_unified_mac.h
@@ -0,0 +1,100 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef MEDIA_AUDIO_MAC_AUDIO_UNIFIED_MAC_H_
+#define MEDIA_AUDIO_MAC_AUDIO_UNIFIED_MAC_H_
+
+#include <CoreAudio/CoreAudio.h>
+
+#include "base/memory/scoped_ptr.h"
+#include "media/audio/audio_io.h"
+#include "media/audio/audio_parameters.h"
+
+namespace media {
+
+class AudioManagerMac;
+
+// Implementation of AudioOutputStream for Mac OS X using the
+// CoreAudio AudioHardware API suitable for low-latency unified audio I/O
+// when using devices which support *both* input and output
+// in the same driver. This is the case with professional
+// USB and Firewire devices.
+//
+// Please note that it's required to first get the native sample-rate of the
+// default output device and use that sample-rate when creating this object.
+class AudioHardwareUnifiedStream : public AudioOutputStream {
+ public:
+ // The ctor takes all the usual parameters, plus |manager| which is the
+ // the audio manager who is creating this object.
+ AudioHardwareUnifiedStream(AudioManagerMac* manager,
+ const AudioParameters& params);
+ // The dtor is typically called by the AudioManager only and it is usually
+ // triggered by calling AudioOutputStream::Close().
+ virtual ~AudioHardwareUnifiedStream();
+
+ // Implementation of AudioOutputStream.
+ virtual bool Open() OVERRIDE;
+ virtual void Close() OVERRIDE;
+ virtual void Start(AudioSourceCallback* callback) OVERRIDE;
+ virtual void Stop() OVERRIDE;
+ virtual void SetVolume(double volume) OVERRIDE;
+ virtual void GetVolume(double* volume) OVERRIDE;
+
+ int input_channels() const { return input_channels_; }
+ int output_channels() const { return output_channels_; }
+
+ private:
+ OSStatus Render(AudioDeviceID device,
+ const AudioTimeStamp* now,
+ const AudioBufferList* input_data,
+ const AudioTimeStamp* input_time,
+ AudioBufferList* output_data,
+ const AudioTimeStamp* output_time);
+
+ static OSStatus RenderProc(AudioDeviceID device,
+ const AudioTimeStamp* now,
+ const AudioBufferList* input_data,
+ const AudioTimeStamp* input_time,
+ AudioBufferList* output_data,
+ const AudioTimeStamp* output_time,
+ void* user_data);
+
+ // Our creator, the audio manager needs to be notified when we close.
+ AudioManagerMac* manager_;
+
+ // Pointer to the object that will provide the audio samples.
+ AudioSourceCallback* source_;
+
+ // Structure that holds the stream format details such as bitrate.
+ AudioStreamBasicDescription format_;
+
+ // Hardware buffer size.
+ int number_of_frames_;
+
+ // Number of audio channels provided to the client via OnMoreIOData().
+ int client_input_channels_;
+
+ // Volume level from 0 to 1.
+ float volume_;
+
+ // Number of input and output channels queried from the hardware.
+ int input_channels_;
+ int output_channels_;
+ int input_channels_per_frame_;
+ int output_channels_per_frame_;
+
+ AudioDeviceIOProcID io_proc_id_;
+ AudioDeviceID device_;
+ bool is_playing_;
+
+ // Intermediate buffers used with call to OnMoreIOData().
+ scoped_ptr<AudioBus> input_bus_;
+ scoped_ptr<AudioBus> output_bus_;
+
+ DISALLOW_COPY_AND_ASSIGN(AudioHardwareUnifiedStream);
+};
+
+} // namespace media
+
+#endif // MEDIA_AUDIO_MAC_AUDIO_UNIFIED_MAC_H_
diff --git a/media/audio/mock_audio_manager.cc b/media/audio/mock_audio_manager.cc
index 318bf09..5984790 100644
--- a/media/audio/mock_audio_manager.cc
+++ b/media/audio/mock_audio_manager.cc
@@ -47,14 +47,16 @@ void MockAudioManager::GetAudioOutputDeviceNames(
media::AudioOutputStream* MockAudioManager::MakeAudioOutputStream(
const media::AudioParameters& params,
- const std::string& device_id) {
+ const std::string& device_id,
+ const std::string& input_device_id) {
NOTREACHED();
return NULL;
}
media::AudioOutputStream* MockAudioManager::MakeAudioOutputStreamProxy(
const media::AudioParameters& params,
- const std::string& device_id) {
+ const std::string& device_id,
+ const std::string& input_device_id) {
NOTREACHED();
return NULL;
}
diff --git a/media/audio/mock_audio_manager.h b/media/audio/mock_audio_manager.h
index 8ca4009..0cd79e7 100644
--- a/media/audio/mock_audio_manager.h
+++ b/media/audio/mock_audio_manager.h
@@ -40,11 +40,13 @@ class MockAudioManager : public media::AudioManager {
virtual media::AudioOutputStream* MakeAudioOutputStream(
const media::AudioParameters& params,
- const std::string& device_id) OVERRIDE;
+ const std::string& device_id,
+ const std::string& input_device_id) OVERRIDE;
virtual media::AudioOutputStream* MakeAudioOutputStreamProxy(
const media::AudioParameters& params,
- const std::string& device_id) OVERRIDE;
+ const std::string& device_id,
+ const std::string& input_device_id) OVERRIDE;
virtual media::AudioInputStream* MakeAudioInputStream(
const media::AudioParameters& params,
diff --git a/media/audio/openbsd/audio_manager_openbsd.cc b/media/audio/openbsd/audio_manager_openbsd.cc
index 141689a..b378b02 100644
--- a/media/audio/openbsd/audio_manager_openbsd.cc
+++ b/media/audio/openbsd/audio_manager_openbsd.cc
@@ -92,7 +92,8 @@ AudioOutputStream* AudioManagerOpenBSD::MakeLinearOutputStream(
AudioOutputStream* AudioManagerOpenBSD::MakeLowLatencyOutputStream(
const AudioParameters& params,
- const std::string& device_id) {
+ const std::string& device_id,
+ const std::string& input_device_id) {
DLOG_IF(ERROR, !device_id.empty()) << "Not implemented!";
DCHECK_EQ(AudioParameters::AUDIO_PCM_LOW_LATENCY, params.format);
return MakeOutputStream(params);
diff --git a/media/audio/openbsd/audio_manager_openbsd.h b/media/audio/openbsd/audio_manager_openbsd.h
index 53b7dfb..113f591 100644
--- a/media/audio/openbsd/audio_manager_openbsd.h
+++ b/media/audio/openbsd/audio_manager_openbsd.h
@@ -27,7 +27,8 @@ class MEDIA_EXPORT AudioManagerOpenBSD : public AudioManagerBase {
const AudioParameters& params) OVERRIDE;
virtual AudioOutputStream* MakeLowLatencyOutputStream(
const AudioParameters& params,
- const std::string& device_id) OVERRIDE;
+ const std::string& device_id,
+ const std::string& input_device_id) OVERRIDE;
virtual AudioInputStream* MakeLinearInputStream(
const AudioParameters& params, const std::string& device_id) OVERRIDE;
virtual AudioInputStream* MakeLowLatencyInputStream(
diff --git a/media/audio/pulse/audio_manager_pulse.cc b/media/audio/pulse/audio_manager_pulse.cc
index ea328ad..da106c2 100644
--- a/media/audio/pulse/audio_manager_pulse.cc
+++ b/media/audio/pulse/audio_manager_pulse.cc
@@ -16,6 +16,7 @@
#include "media/audio/audio_parameters.h"
#include "media/audio/pulse/pulse_input.h"
#include "media/audio/pulse/pulse_output.h"
+#include "media/audio/pulse/pulse_unified.h"
#include "media/audio/pulse/pulse_util.h"
#include "media/base/channel_layout.h"
@@ -132,15 +133,16 @@ AudioParameters AudioManagerPulse::GetInputStreamParameters(
AudioOutputStream* AudioManagerPulse::MakeLinearOutputStream(
const AudioParameters& params) {
DCHECK_EQ(AudioParameters::AUDIO_PCM_LINEAR, params.format());
- return MakeOutputStream(params);
+ return MakeOutputStream(params, std::string());
}
AudioOutputStream* AudioManagerPulse::MakeLowLatencyOutputStream(
const AudioParameters& params,
- const std::string& device_id) {
+ const std::string& device_id,
+ const std::string& input_device_id) {
DLOG_IF(ERROR, !device_id.empty()) << "Not implemented!";
DCHECK_EQ(AudioParameters::AUDIO_PCM_LOW_LATENCY, params.format());
- return MakeOutputStream(params);
+ return MakeOutputStream(params, input_device_id);
}
AudioInputStream* AudioManagerPulse::MakeLinearInputStream(
@@ -187,7 +189,11 @@ AudioParameters AudioManagerPulse::GetPreferredOutputStreamParameters(
}
AudioOutputStream* AudioManagerPulse::MakeOutputStream(
- const AudioParameters& params) {
+ const AudioParameters& params, const std::string& input_device_id) {
+ if (params.input_channels()) {
+ return new PulseAudioUnifiedStream(params, input_device_id, this);
+ }
+
return new PulseAudioOutputStream(params, this);
}
diff --git a/media/audio/pulse/audio_manager_pulse.h b/media/audio/pulse/audio_manager_pulse.h
index b3b0031..45fb8cb 100644
--- a/media/audio/pulse/audio_manager_pulse.h
+++ b/media/audio/pulse/audio_manager_pulse.h
@@ -37,7 +37,8 @@ class MEDIA_EXPORT AudioManagerPulse : public AudioManagerBase {
const AudioParameters& params) OVERRIDE;
virtual AudioOutputStream* MakeLowLatencyOutputStream(
const AudioParameters& params,
- const std::string& device_id) OVERRIDE;
+ const std::string& device_id,
+ const std::string& input_device_id) OVERRIDE;
virtual AudioInputStream* MakeLinearInputStream(
const AudioParameters& params, const std::string& device_id) OVERRIDE;
virtual AudioInputStream* MakeLowLatencyInputStream(
@@ -69,7 +70,8 @@ class MEDIA_EXPORT AudioManagerPulse : public AudioManagerBase {
void* user_data);
// Called by MakeLinearOutputStream and MakeLowLatencyOutputStream.
- AudioOutputStream* MakeOutputStream(const AudioParameters& params);
+ AudioOutputStream* MakeOutputStream(const AudioParameters& params,
+ const std::string& input_device_id);
// Called by MakeLinearInputStream and MakeLowLatencyInputStream.
AudioInputStream* MakeInputStream(const AudioParameters& params,
diff --git a/media/audio/pulse/pulse_unified.cc b/media/audio/pulse/pulse_unified.cc
new file mode 100644
index 0000000..cd17b01
--- /dev/null
+++ b/media/audio/pulse/pulse_unified.cc
@@ -0,0 +1,292 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "media/audio/pulse/pulse_unified.h"
+
+#include "base/single_thread_task_runner.h"
+#include "base/time/time.h"
+#include "media/audio/audio_manager_base.h"
+#include "media/audio/audio_parameters.h"
+#include "media/audio/pulse/pulse_util.h"
+#include "media/base/seekable_buffer.h"
+
+namespace media {
+
+using pulse::AutoPulseLock;
+using pulse::WaitForOperationCompletion;
+
+static const int kFifoSizeInPackets = 10;
+
+// static, pa_stream_notify_cb
+void PulseAudioUnifiedStream::StreamNotifyCallback(pa_stream* s,
+ void* user_data) {
+ PulseAudioUnifiedStream* stream =
+ static_cast<PulseAudioUnifiedStream*>(user_data);
+
+ // Forward unexpected failures to the AudioSourceCallback if available. All
+ // these variables are only modified under pa_threaded_mainloop_lock() so this
+ // should be thread safe.
+ if (s && stream->source_callback_ &&
+ pa_stream_get_state(s) == PA_STREAM_FAILED) {
+ stream->source_callback_->OnError(stream);
+ }
+
+ pa_threaded_mainloop_signal(stream->pa_mainloop_, 0);
+}
+
+// static, used by pa_stream_set_read_callback.
+void PulseAudioUnifiedStream::ReadCallback(pa_stream* handle, size_t length,
+ void* user_data) {
+ static_cast<PulseAudioUnifiedStream*>(user_data)->ReadData();
+}
+
+PulseAudioUnifiedStream::PulseAudioUnifiedStream(
+ const AudioParameters& params,
+ const std::string& input_device_id,
+ AudioManagerBase* manager)
+ : params_(params),
+ input_device_id_(input_device_id),
+ manager_(manager),
+ pa_context_(NULL),
+ pa_mainloop_(NULL),
+ input_stream_(NULL),
+ output_stream_(NULL),
+ volume_(1.0f),
+ source_callback_(NULL) {
+ DCHECK(manager_->GetTaskRunner()->BelongsToCurrentThread());
+ CHECK(params_.IsValid());
+ input_bus_ = AudioBus::Create(params_);
+ output_bus_ = AudioBus::Create(params_);
+}
+
+PulseAudioUnifiedStream::~PulseAudioUnifiedStream() {
+ // All internal structures should already have been freed in Close(), which
+ // calls AudioManagerBase::ReleaseOutputStream() which deletes this object.
+ DCHECK(!input_stream_);
+ DCHECK(!output_stream_);
+ DCHECK(!pa_context_);
+ DCHECK(!pa_mainloop_);
+}
+
+bool PulseAudioUnifiedStream::Open() {
+ DCHECK(manager_->GetTaskRunner()->BelongsToCurrentThread());
+ // Prepare the recording buffers for the callbacks.
+ fifo_.reset(new media::SeekableBuffer(
+ 0, kFifoSizeInPackets * params_.GetBytesPerBuffer()));
+ input_data_buffer_.reset(new uint8[params_.GetBytesPerBuffer()]);
+
+ if (!pulse::CreateOutputStream(&pa_mainloop_, &pa_context_, &output_stream_,
+ params_, &StreamNotifyCallback, NULL, this))
+ return false;
+
+ if (!pulse::CreateInputStream(pa_mainloop_, pa_context_, &input_stream_,
+ params_, input_device_id_,
+ &StreamNotifyCallback, this))
+ return false;
+
+ DCHECK(pa_mainloop_);
+ DCHECK(pa_context_);
+ DCHECK(input_stream_);
+ DCHECK(output_stream_);
+ return true;
+}
+
+void PulseAudioUnifiedStream::Reset() {
+ if (!pa_mainloop_) {
+ DCHECK(!input_stream_);
+ DCHECK(!output_stream_);
+ DCHECK(!pa_context_);
+ return;
+ }
+
+ {
+ AutoPulseLock auto_lock(pa_mainloop_);
+
+ // Close the input stream.
+ if (input_stream_) {
+ // Disable all the callbacks before disconnecting.
+ pa_stream_set_state_callback(input_stream_, NULL, NULL);
+ pa_stream_flush(input_stream_, NULL, NULL);
+ pa_stream_disconnect(input_stream_);
+
+ // Release PulseAudio structures.
+ pa_stream_unref(input_stream_);
+ input_stream_ = NULL;
+ }
+
+ // Close the ouput stream.
+ if (output_stream_) {
+ // Release PulseAudio output stream structures.
+ pa_stream_set_state_callback(output_stream_, NULL, NULL);
+ pa_stream_disconnect(output_stream_);
+ pa_stream_unref(output_stream_);
+ output_stream_ = NULL;
+ }
+
+ if (pa_context_) {
+ pa_context_disconnect(pa_context_);
+ pa_context_set_state_callback(pa_context_, NULL, NULL);
+ pa_context_unref(pa_context_);
+ pa_context_ = NULL;
+ }
+ }
+
+ pa_threaded_mainloop_stop(pa_mainloop_);
+ pa_threaded_mainloop_free(pa_mainloop_);
+ pa_mainloop_ = NULL;
+}
+
+void PulseAudioUnifiedStream::Close() {
+ DCHECK(manager_->GetTaskRunner()->BelongsToCurrentThread());
+ Reset();
+
+ // Signal to the manager that we're closed and can be removed.
+ // This should be the last call in the function as it deletes "this".
+ manager_->ReleaseOutputStream(this);
+}
+
+void PulseAudioUnifiedStream::WriteData(size_t requested_bytes) {
+ CHECK_EQ(requested_bytes, static_cast<size_t>(params_.GetBytesPerBuffer()));
+
+ void* buffer = NULL;
+ int frames_filled = 0;
+ if (source_callback_) {
+ CHECK_GE(pa_stream_begin_write(
+ output_stream_, &buffer, &requested_bytes), 0);
+ uint32 hardware_delay = pulse::GetHardwareLatencyInBytes(
+ output_stream_, params_.sample_rate(),
+ params_.GetBytesPerFrame());
+ fifo_->Read(input_data_buffer_.get(), requested_bytes);
+ input_bus_->FromInterleaved(
+ input_data_buffer_.get(), params_.frames_per_buffer(), 2);
+
+ frames_filled = source_callback_->OnMoreIOData(
+ input_bus_.get(),
+ output_bus_.get(),
+ AudioBuffersState(0, hardware_delay));
+ }
+
+ // Zero the unfilled data so it plays back as silence.
+ if (frames_filled < output_bus_->frames()) {
+ output_bus_->ZeroFramesPartial(
+ frames_filled, output_bus_->frames() - frames_filled);
+ }
+
+ // Note: If this ever changes to output raw float the data must be clipped
+ // and sanitized since it may come from an untrusted source such as NaCl.
+ output_bus_->Scale(volume_);
+ output_bus_->ToInterleaved(
+ output_bus_->frames(), params_.bits_per_sample() / 8, buffer);
+
+ if (pa_stream_write(output_stream_, buffer, requested_bytes, NULL, 0LL,
+ PA_SEEK_RELATIVE) < 0) {
+ if (source_callback_) {
+ source_callback_->OnError(this);
+ }
+ }
+}
+
+void PulseAudioUnifiedStream::ReadData() {
+ do {
+ size_t length = 0;
+ const void* data = NULL;
+ pa_stream_peek(input_stream_, &data, &length);
+ if (!data || length == 0)
+ break;
+
+ fifo_->Append(reinterpret_cast<const uint8*>(data), length);
+
+ // Deliver the recording data to the renderer and drive the playout.
+ int packet_size = params_.GetBytesPerBuffer();
+ while (fifo_->forward_bytes() >= packet_size) {
+ WriteData(packet_size);
+ }
+
+ // Checks if we still have data.
+ pa_stream_drop(input_stream_);
+ } while (pa_stream_readable_size(input_stream_) > 0);
+
+ pa_threaded_mainloop_signal(pa_mainloop_, 0);
+}
+
+void PulseAudioUnifiedStream::Start(AudioSourceCallback* callback) {
+ DCHECK(manager_->GetTaskRunner()->BelongsToCurrentThread());
+ CHECK(callback);
+ CHECK(input_stream_);
+ CHECK(output_stream_);
+ AutoPulseLock auto_lock(pa_mainloop_);
+
+ // Ensure the context and stream are ready.
+ if (pa_context_get_state(pa_context_) != PA_CONTEXT_READY &&
+ pa_stream_get_state(output_stream_) != PA_STREAM_READY &&
+ pa_stream_get_state(input_stream_) != PA_STREAM_READY) {
+ callback->OnError(this);
+ return;
+ }
+
+ source_callback_ = callback;
+
+ fifo_->Clear();
+
+ // Uncork (resume) the input stream.
+ pa_stream_set_read_callback(input_stream_, &ReadCallback, this);
+ pa_stream_readable_size(input_stream_);
+ pa_operation* operation = pa_stream_cork(input_stream_, 0, NULL, NULL);
+ WaitForOperationCompletion(pa_mainloop_, operation);
+
+ // Uncork (resume) the output stream.
+ // We use the recording stream to drive the playback, so we do not need to
+ // register the write callback using pa_stream_set_write_callback().
+ operation = pa_stream_cork(output_stream_, 0,
+ &pulse::StreamSuccessCallback, pa_mainloop_);
+ WaitForOperationCompletion(pa_mainloop_, operation);
+}
+
+void PulseAudioUnifiedStream::Stop() {
+ DCHECK(manager_->GetTaskRunner()->BelongsToCurrentThread());
+
+ // Cork (pause) the stream. Waiting for the main loop lock will ensure
+ // outstanding callbacks have completed.
+ AutoPulseLock auto_lock(pa_mainloop_);
+
+ // Set |source_callback_| to NULL so all FulfillWriteRequest() calls which may
+ // occur while waiting on the flush and cork exit immediately.
+ source_callback_ = NULL;
+
+ // Set the read callback to NULL before flushing the stream, otherwise it
+ // will cause deadlock on the operation.
+ pa_stream_set_read_callback(input_stream_, NULL, NULL);
+ pa_operation* operation = pa_stream_flush(
+ input_stream_, &pulse::StreamSuccessCallback, pa_mainloop_);
+ WaitForOperationCompletion(pa_mainloop_, operation);
+
+ operation = pa_stream_cork(input_stream_, 1, &pulse::StreamSuccessCallback,
+ pa_mainloop_);
+ WaitForOperationCompletion(pa_mainloop_, operation);
+
+ // Flush the stream prior to cork, doing so after will cause hangs. Write
+ // callbacks are suspended while inside pa_threaded_mainloop_lock() so this
+ // is all thread safe.
+ operation = pa_stream_flush(
+ output_stream_, &pulse::StreamSuccessCallback, pa_mainloop_);
+ WaitForOperationCompletion(pa_mainloop_, operation);
+
+ operation = pa_stream_cork(output_stream_, 1, &pulse::StreamSuccessCallback,
+ pa_mainloop_);
+ WaitForOperationCompletion(pa_mainloop_, operation);
+}
+
+void PulseAudioUnifiedStream::SetVolume(double volume) {
+ DCHECK(manager_->GetTaskRunner()->BelongsToCurrentThread());
+
+ volume_ = static_cast<float>(volume);
+}
+
+void PulseAudioUnifiedStream::GetVolume(double* volume) {
+ DCHECK(manager_->GetTaskRunner()->BelongsToCurrentThread());
+
+ *volume = volume_;
+}
+
+} // namespace media
diff --git a/media/audio/pulse/pulse_unified.h b/media/audio/pulse/pulse_unified.h
new file mode 100644
index 0000000..a800d09
--- /dev/null
+++ b/media/audio/pulse/pulse_unified.h
@@ -0,0 +1,90 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef MEDIA_AUDIO_PULSE_PULSE_UNIFIED_H_
+#define MEDIA_AUDIO_PULSE_PULSE_UNIFIED_H_
+
+#include <pulse/pulseaudio.h>
+#include <string>
+
+#include "base/memory/scoped_ptr.h"
+#include "media/audio/audio_io.h"
+#include "media/audio/audio_parameters.h"
+#include "media/base/audio_fifo.h"
+
+namespace media {
+
+class AudioManagerBase;
+class SeekableBuffer;
+
+class PulseAudioUnifiedStream : public AudioOutputStream {
+ public:
+ PulseAudioUnifiedStream(const AudioParameters& params,
+ const std::string& input_device_id,
+ AudioManagerBase* manager);
+
+ virtual ~PulseAudioUnifiedStream();
+
+ // Implementation of PulseAudioUnifiedStream.
+ virtual bool Open() OVERRIDE;
+ virtual void Close() OVERRIDE;
+ virtual void Start(AudioSourceCallback* callback) OVERRIDE;
+ virtual void Stop() OVERRIDE;
+ virtual void SetVolume(double volume) OVERRIDE;
+ virtual void GetVolume(double* volume) OVERRIDE;
+
+ private:
+ // Called by PulseAudio when |pa_stream_| change state. If an unexpected
+ // failure state change happens and |source_callback_| is set
+ // this method will forward the error via OnError().
+ static void StreamNotifyCallback(pa_stream* s, void* user_data);
+
+ // Called by PulseAudio recording stream when it has data.
+ static void ReadCallback(pa_stream* s, size_t length, void* user_data);
+
+ // Helpers for ReadCallback() to read and write data.
+ void WriteData(size_t requested_bytes);
+ void ReadData();
+
+ // Close() helper function to free internal structs.
+ void Reset();
+
+ // AudioParameters from the constructor.
+ const AudioParameters params_;
+
+ // Device unique ID of the input device.
+ const std::string input_device_id_;
+
+ // Audio manager that created us. Used to report that we've closed.
+ AudioManagerBase* manager_;
+
+ // PulseAudio API structs.
+ pa_context* pa_context_;
+ pa_threaded_mainloop* pa_mainloop_;
+ pa_stream* input_stream_;
+ pa_stream* output_stream_;
+
+ // Float representation of volume from 0.0 to 1.0.
+ float volume_;
+
+ // Callback to audio data source. Must only be modified while holding a lock
+ // on |pa_mainloop_| via pa_threaded_mainloop_lock().
+ AudioSourceCallback* source_callback_;
+
+ scoped_ptr<AudioBus> input_bus_;
+ scoped_ptr<AudioBus> output_bus_;
+
+ // Used for input to output buffering.
+ scoped_ptr<media::SeekableBuffer> fifo_;
+
+ // Temporary storage for recorded data. It gets a packet of data from
+ // |fifo_| and deliver the data to OnMoreIOData() callback.
+ scoped_ptr<uint8[]> input_data_buffer_;
+
+ DISALLOW_COPY_AND_ASSIGN(PulseAudioUnifiedStream);
+};
+
+} // namespace media
+
+#endif // MEDIA_AUDIO_PULSE_PULSE_UNIFIED_H_
diff --git a/media/audio/sounds/audio_stream_handler.cc b/media/audio/sounds/audio_stream_handler.cc
index 9452f04..f286356 100644
--- a/media/audio/sounds/audio_stream_handler.cc
+++ b/media/audio/sounds/audio_stream_handler.cc
@@ -59,7 +59,7 @@ class AudioStreamHandler::AudioStreamContainer
p.bits_per_sample(),
kDefaultFrameCount);
stream_ = AudioManager::Get()->MakeAudioOutputStreamProxy(
- params, std::string());
+ params, std::string(), std::string());
if (!stream_ || !stream_->Open()) {
LOG(ERROR) << "Failed to open an output stream.";
return;
diff --git a/media/audio/win/audio_low_latency_output_win_unittest.cc b/media/audio/win/audio_low_latency_output_win_unittest.cc
index c2b4d97..66d3891 100644
--- a/media/audio/win/audio_low_latency_output_win_unittest.cc
+++ b/media/audio/win/audio_low_latency_output_win_unittest.cc
@@ -224,7 +224,7 @@ class AudioOutputStreamWrapper {
AudioOutputStream* aos = audio_man_->MakeAudioOutputStream(
AudioParameters(format_, channel_layout_, sample_rate_,
bits_per_sample_, samples_per_packet_),
- std::string());
+ std::string(), std::string());
EXPECT_TRUE(aos);
return aos;
}
diff --git a/media/audio/win/audio_manager_win.cc b/media/audio/win/audio_manager_win.cc
index a8678cd..ec963a7 100644
--- a/media/audio/win/audio_manager_win.cc
+++ b/media/audio/win/audio_manager_win.cc
@@ -26,6 +26,7 @@
#include "media/audio/win/audio_low_latency_input_win.h"
#include "media/audio/win/audio_low_latency_output_win.h"
#include "media/audio/win/audio_manager_win.h"
+#include "media/audio/win/audio_unified_win.h"
#include "media/audio/win/core_audio_util_win.h"
#include "media/audio/win/device_enumeration_win.h"
#include "media/audio/win/wavein_input_win.h"
@@ -352,7 +353,8 @@ AudioOutputStream* AudioManagerWin::MakeLinearOutputStream(
// - WASAPIAudioOutputStream: Based on Core Audio (WASAPI) API.
AudioOutputStream* AudioManagerWin::MakeLowLatencyOutputStream(
const AudioParameters& params,
- const std::string& device_id) {
+ const std::string& device_id,
+ const std::string& input_device_id) {
DCHECK_EQ(AudioParameters::AUDIO_PCM_LOW_LATENCY, params.format());
if (params.channels() > kWinMaxChannels)
return NULL;
@@ -367,6 +369,15 @@ AudioOutputStream* AudioManagerWin::MakeLowLatencyOutputStream(
this, params, NumberOfWaveOutBuffers(), WAVE_MAPPER);
}
+ // TODO(rtoy): support more than stereo input.
+ if (params.input_channels() > 0) {
+ DVLOG(1) << "WASAPIUnifiedStream is created.";
+ DLOG_IF(ERROR, !device_id.empty() &&
+ device_id != AudioManagerBase::kDefaultDeviceId)
+ << "Opening by device id not supported by WASAPIUnifiedStream";
+ return new WASAPIUnifiedStream(this, params, input_device_id);
+ }
+
// Pass an empty string to indicate that we want the default device
// since we consistently only check for an empty string in
// WASAPIAudioOutputStream.
diff --git a/media/audio/win/audio_manager_win.h b/media/audio/win/audio_manager_win.h
index 3a05ee6..c2ac8d7 100644
--- a/media/audio/win/audio_manager_win.h
+++ b/media/audio/win/audio_manager_win.h
@@ -39,7 +39,8 @@ class MEDIA_EXPORT AudioManagerWin : public AudioManagerBase {
const AudioParameters& params) OVERRIDE;
virtual AudioOutputStream* MakeLowLatencyOutputStream(
const AudioParameters& params,
- const std::string& device_id) OVERRIDE;
+ const std::string& device_id,
+ const std::string& input_device_id) OVERRIDE;
virtual AudioInputStream* MakeLinearInputStream(
const AudioParameters& params, const std::string& device_id) OVERRIDE;
virtual AudioInputStream* MakeLowLatencyInputStream(
diff --git a/media/audio/win/audio_output_win_unittest.cc b/media/audio/win/audio_output_win_unittest.cc
index 72edad6..dcb5ca3 100644
--- a/media/audio/win/audio_output_win_unittest.cc
+++ b/media/audio/win/audio_output_win_unittest.cc
@@ -175,7 +175,7 @@ TEST(WinAudioTest, PCMWaveStreamGetAndClose) {
AudioOutputStream* oas = audio_man->MakeAudioOutputStream(
AudioParameters(AudioParameters::AUDIO_PCM_LINEAR, CHANNEL_LAYOUT_STEREO,
8000, 16, 256),
- std::string());
+ std::string(), std::string());
ASSERT_TRUE(NULL != oas);
oas->Close();
}
@@ -191,29 +191,29 @@ TEST(WinAudioTest, SanityOnMakeParams) {
AudioParameters::Format fmt = AudioParameters::AUDIO_PCM_LINEAR;
EXPECT_TRUE(NULL == audio_man->MakeAudioOutputStream(
AudioParameters(fmt, CHANNEL_LAYOUT_UNSUPPORTED, 8000, 16, 256),
- std::string()));
+ std::string(), std::string()));
EXPECT_TRUE(NULL == audio_man->MakeAudioOutputStream(
AudioParameters(fmt, CHANNEL_LAYOUT_MONO, 1024 * 1024, 16, 256),
- std::string()));
+ std::string(), std::string()));
EXPECT_TRUE(NULL == audio_man->MakeAudioOutputStream(
AudioParameters(fmt, CHANNEL_LAYOUT_STEREO, 8000, 80, 256),
- std::string()));
+ std::string(), std::string()));
EXPECT_TRUE(NULL == audio_man->MakeAudioOutputStream(
AudioParameters(fmt, CHANNEL_LAYOUT_UNSUPPORTED, 8000, 16, 256),
- std::string()));
+ std::string(), std::string()));
EXPECT_TRUE(NULL == audio_man->MakeAudioOutputStream(
AudioParameters(fmt, CHANNEL_LAYOUT_STEREO, -8000, 16, 256),
- std::string()));
+ std::string(), std::string()));
EXPECT_TRUE(NULL == audio_man->MakeAudioOutputStream(
AudioParameters(fmt, CHANNEL_LAYOUT_MONO, 8000, 16, -100),
- std::string()));
+ std::string(), std::string()));
EXPECT_TRUE(NULL == audio_man->MakeAudioOutputStream(
AudioParameters(fmt, CHANNEL_LAYOUT_MONO, 8000, 16, 0),
- std::string()));
+ std::string(), std::string()));
EXPECT_TRUE(NULL == audio_man->MakeAudioOutputStream(
AudioParameters(fmt, CHANNEL_LAYOUT_MONO, 8000, 16,
media::limits::kMaxSamplesPerPacket + 1),
- std::string()));
+ std::string(), std::string()));
}
// Test that it can be opened and closed.
@@ -227,7 +227,7 @@ TEST(WinAudioTest, PCMWaveStreamOpenAndClose) {
AudioOutputStream* oas = audio_man->MakeAudioOutputStream(
AudioParameters(AudioParameters::AUDIO_PCM_LINEAR, CHANNEL_LAYOUT_STEREO,
8000, 16, 256),
- std::string());
+ std::string(), std::string());
ASSERT_TRUE(NULL != oas);
EXPECT_TRUE(oas->Open());
oas->Close();
@@ -244,7 +244,7 @@ TEST(WinAudioTest, PCMWaveStreamOpenLimit) {
AudioOutputStream* oas = audio_man->MakeAudioOutputStream(
AudioParameters(AudioParameters::AUDIO_PCM_LINEAR, CHANNEL_LAYOUT_STEREO,
8000, 16, 1024 * 1024 * 1024),
- std::string());
+ std::string(), std::string());
EXPECT_TRUE(NULL == oas);
if (oas)
oas->Close();
@@ -263,7 +263,7 @@ TEST(WinAudioTest, PCMWaveSlowSource) {
AudioOutputStream* oas = audio_man->MakeAudioOutputStream(
AudioParameters(AudioParameters::AUDIO_PCM_LINEAR, CHANNEL_LAYOUT_MONO,
16000, 16, 256),
- std::string());
+ std::string(), std::string());
ASSERT_TRUE(NULL != oas);
TestSourceLaggy test_laggy(2, 90);
EXPECT_TRUE(oas->Open());
@@ -292,7 +292,7 @@ TEST(WinAudioTest, PCMWaveStreamPlaySlowLoop) {
AudioOutputStream* oas = audio_man->MakeAudioOutputStream(
AudioParameters(AudioParameters::AUDIO_PCM_LINEAR, CHANNEL_LAYOUT_MONO,
AudioParameters::kAudioCDSampleRate, 16, samples_100_ms),
- std::string());
+ std::string(), std::string());
ASSERT_TRUE(NULL != oas);
SineWaveAudioSource source(1, 200.0, AudioParameters::kAudioCDSampleRate);
@@ -323,7 +323,7 @@ TEST(WinAudioTest, PCMWaveStreamPlay200HzTone44Kss) {
AudioOutputStream* oas = audio_man->MakeAudioOutputStream(
AudioParameters(AudioParameters::AUDIO_PCM_LINEAR, CHANNEL_LAYOUT_MONO,
AudioParameters::kAudioCDSampleRate, 16, samples_100_ms),
- std::string());
+ std::string(), std::string());
ASSERT_TRUE(NULL != oas);
SineWaveAudioSource source(1, 200.0, AudioParameters::kAudioCDSampleRate);
@@ -352,7 +352,7 @@ TEST(WinAudioTest, PCMWaveStreamPlay200HzTone22Kss) {
AudioParameters(AudioParameters::AUDIO_PCM_LINEAR, CHANNEL_LAYOUT_MONO,
AudioParameters::kAudioCDSampleRate / 2, 16,
samples_100_ms),
- std::string());
+ std::string(), std::string());
ASSERT_TRUE(NULL != oas);
SineWaveAudioSource source(1, 200.0, AudioParameters::kAudioCDSampleRate/2);
@@ -392,7 +392,7 @@ TEST(WinAudioTest, PushSourceFile16KHz) {
AudioOutputStream* oas = audio_man->MakeAudioOutputStream(
AudioParameters(AudioParameters::AUDIO_PCM_LINEAR, CHANNEL_LAYOUT_MONO,
kSampleRate, 16, kSamples100ms),
- std::string());
+ std::string(), std::string());
ASSERT_TRUE(NULL != oas);
EXPECT_TRUE(oas->Open());
@@ -429,7 +429,7 @@ TEST(WinAudioTest, PCMWaveStreamPlayTwice200HzTone44Kss) {
AudioOutputStream* oas = audio_man->MakeAudioOutputStream(
AudioParameters(AudioParameters::AUDIO_PCM_LINEAR, CHANNEL_LAYOUT_MONO,
AudioParameters::kAudioCDSampleRate, 16, samples_100_ms),
- std::string());
+ std::string(), std::string());
ASSERT_TRUE(NULL != oas);
SineWaveAudioSource source(1, 200.0, AudioParameters::kAudioCDSampleRate);
@@ -476,7 +476,7 @@ TEST(WinAudioTest, PCMWaveStreamPlay200HzToneLowLatency) {
AudioParameters(AudioParameters::AUDIO_PCM_LOW_LATENCY,
CHANNEL_LAYOUT_MONO, sample_rate,
16, n * samples_10_ms),
- std::string());
+ std::string(), std::string());
ASSERT_TRUE(NULL != oas);
SineWaveAudioSource source(1, 200, sample_rate);
@@ -510,7 +510,7 @@ TEST(WinAudioTest, PCMWaveStreamPendingBytes) {
AudioOutputStream* oas = audio_man->MakeAudioOutputStream(
AudioParameters(AudioParameters::AUDIO_PCM_LINEAR, CHANNEL_LAYOUT_MONO,
AudioParameters::kAudioCDSampleRate, 16, samples_100_ms),
- std::string());
+ std::string(), std::string());
ASSERT_TRUE(NULL != oas);
NiceMock<MockAudioSourceCallback> source;
@@ -664,7 +664,7 @@ TEST(WinAudioTest, SyncSocketBasic) {
AudioOutputStream* oas = audio_man->MakeAudioOutputStream(params,
- std::string());
+ std::string(), std::string());
ASSERT_TRUE(NULL != oas);
ASSERT_TRUE(oas->Open());
diff --git a/media/audio/win/audio_unified_win.cc b/media/audio/win/audio_unified_win.cc
new file mode 100644
index 0000000..901c8b8
--- /dev/null
+++ b/media/audio/win/audio_unified_win.cc
@@ -0,0 +1,984 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "media/audio/win/audio_unified_win.h"
+
+#include <Functiondiscoverykeys_devpkey.h>
+
+#include "base/debug/trace_event.h"
+#ifndef NDEBUG
+#include "base/file_util.h"
+#include "base/path_service.h"
+#endif
+#include "base/time/time.h"
+#include "base/win/scoped_com_initializer.h"
+#include "media/audio/win/audio_manager_win.h"
+#include "media/audio/win/avrt_wrapper_win.h"
+#include "media/audio/win/core_audio_util_win.h"
+
+using base::win::ScopedComPtr;
+using base::win::ScopedCOMInitializer;
+using base::win::ScopedCoMem;
+
+// Smoothing factor in exponential smoothing filter where 0 < alpha < 1.
+// Larger values of alpha reduce the level of smoothing.
+// See http://en.wikipedia.org/wiki/Exponential_smoothing for details.
+static const double kAlpha = 0.1;
+
+// Compute a rate compensation which always attracts us back to a specified
+// target level over a period of |kCorrectionTimeSeconds|.
+static const double kCorrectionTimeSeconds = 0.1;
+
+#ifndef NDEBUG
+// Max number of columns in the output text file |kUnifiedAudioDebugFileName|.
+// See LogElementNames enumerator for details on what each column represents.
+static const size_t kMaxNumSampleTypes = 4;
+
+static const size_t kMaxNumParams = 2;
+
+// Max number of rows in the output file |kUnifiedAudioDebugFileName|.
+// Each row corresponds to one set of sample values for (approximately) the
+// same time instant (stored in the first column).
+static const size_t kMaxFileSamples = 10000;
+
+// Name of output debug file used for off-line analysis of measurements which
+// can be utilized for performance tuning of this class.
+static const char kUnifiedAudioDebugFileName[] = "unified_win_debug.txt";
+
+// Name of output debug file used for off-line analysis of measurements.
+// This file will contain a list of audio parameters.
+static const char kUnifiedAudioParamsFileName[] = "unified_win_params.txt";
+#endif
+
+// Use the acquired IAudioClock interface to derive a time stamp of the audio
+// sample which is currently playing through the speakers.
+static double SpeakerStreamPosInMilliseconds(IAudioClock* clock) {
+ UINT64 device_frequency = 0, position = 0;
+ if (FAILED(clock->GetFrequency(&device_frequency)) ||
+ FAILED(clock->GetPosition(&position, NULL))) {
+ return 0.0;
+ }
+ return base::Time::kMillisecondsPerSecond *
+ (static_cast<double>(position) / device_frequency);
+}
+
+// Get a time stamp in milliseconds given number of audio frames in |num_frames|
+// using the current sample rate |fs| as scale factor.
+// Example: |num_frames| = 960 and |fs| = 48000 => 20 [ms].
+static double CurrentStreamPosInMilliseconds(UINT64 num_frames, DWORD fs) {
+ return base::Time::kMillisecondsPerSecond *
+ (static_cast<double>(num_frames) / fs);
+}
+
+// Convert a timestamp in milliseconds to byte units given the audio format
+// in |format|.
+// Example: |ts_milliseconds| equals 10, sample rate is 48000 and frame size
+// is 4 bytes per audio frame => 480 * 4 = 1920 [bytes].
+static int MillisecondsToBytes(double ts_milliseconds,
+ const WAVEFORMATPCMEX& format) {
+ double seconds = ts_milliseconds / base::Time::kMillisecondsPerSecond;
+ return static_cast<int>(seconds * format.Format.nSamplesPerSec *
+ format.Format.nBlockAlign + 0.5);
+}
+
+// Convert frame count to milliseconds given the audio format in |format|.
+static double FrameCountToMilliseconds(int num_frames,
+ const WAVEFORMATPCMEX& format) {
+ return (base::Time::kMillisecondsPerSecond * num_frames) /
+ static_cast<double>(format.Format.nSamplesPerSec);
+}
+
+namespace media {
+
+WASAPIUnifiedStream::WASAPIUnifiedStream(AudioManagerWin* manager,
+ const AudioParameters& params,
+ const std::string& input_device_id)
+ : creating_thread_id_(base::PlatformThread::CurrentId()),
+ manager_(manager),
+ params_(params),
+ input_channels_(params.input_channels()),
+ output_channels_(params.channels()),
+ input_device_id_(input_device_id),
+ share_mode_(CoreAudioUtil::GetShareMode()),
+ opened_(false),
+ volume_(1.0),
+ output_buffer_size_frames_(0),
+ input_buffer_size_frames_(0),
+ endpoint_render_buffer_size_frames_(0),
+ endpoint_capture_buffer_size_frames_(0),
+ num_written_frames_(0),
+ total_delay_ms_(0.0),
+ total_delay_bytes_(0),
+ source_(NULL),
+ input_callback_received_(false),
+ io_sample_rate_ratio_(1),
+ target_fifo_frames_(0),
+ average_delta_(0),
+ fifo_rate_compensation_(1),
+ update_output_delay_(false),
+ capture_delay_ms_(0) {
+ TRACE_EVENT0("audio", "WASAPIUnifiedStream::WASAPIUnifiedStream");
+ VLOG(1) << "WASAPIUnifiedStream::WASAPIUnifiedStream()";
+ DCHECK(manager_);
+
+ VLOG(1) << "Input channels : " << input_channels_;
+ VLOG(1) << "Output channels: " << output_channels_;
+ VLOG(1) << "Sample rate : " << params_.sample_rate();
+ VLOG(1) << "Buffer size : " << params.frames_per_buffer();
+
+#ifndef NDEBUG
+ input_time_stamps_.reset(new int64[kMaxFileSamples]);
+ num_frames_in_fifo_.reset(new int[kMaxFileSamples]);
+ resampler_margin_.reset(new int[kMaxFileSamples]);
+ fifo_rate_comps_.reset(new double[kMaxFileSamples]);
+ num_elements_.reset(new int[kMaxNumSampleTypes]);
+ std::fill(num_elements_.get(), num_elements_.get() + kMaxNumSampleTypes, 0);
+ input_params_.reset(new int[kMaxNumParams]);
+ output_params_.reset(new int[kMaxNumParams]);
+#endif
+
+ DVLOG_IF(1, share_mode_ == AUDCLNT_SHAREMODE_EXCLUSIVE)
+ << "Core Audio (WASAPI) EXCLUSIVE MODE is enabled.";
+
+ // Load the Avrt DLL if not already loaded. Required to support MMCSS.
+ bool avrt_init = avrt::Initialize();
+ DCHECK(avrt_init) << "Failed to load the avrt.dll";
+
+ // All events are auto-reset events and non-signaled initially.
+
+ // Create the event which the audio engine will signal each time a buffer
+ // has been recorded.
+ capture_event_.Set(CreateEvent(NULL, FALSE, FALSE, NULL));
+
+ // Create the event which will be set in Stop() when straeming shall stop.
+ stop_streaming_event_.Set(CreateEvent(NULL, FALSE, FALSE, NULL));
+}
+
+WASAPIUnifiedStream::~WASAPIUnifiedStream() {
+ VLOG(1) << "WASAPIUnifiedStream::~WASAPIUnifiedStream()";
+#ifndef NDEBUG
+ base::FilePath data_file_name;
+ PathService::Get(base::DIR_EXE, &data_file_name);
+ data_file_name = data_file_name.AppendASCII(kUnifiedAudioDebugFileName);
+ data_file_ = base::OpenFile(data_file_name, "wt");
+ DVLOG(1) << ">> Output file " << data_file_name.value() << " is created.";
+
+ size_t n = 0;
+ size_t elements_to_write = *std::min_element(
+ num_elements_.get(), num_elements_.get() + kMaxNumSampleTypes);
+ while (n < elements_to_write) {
+ fprintf(data_file_, "%I64d %d %d %10.9f\n",
+ input_time_stamps_[n],
+ num_frames_in_fifo_[n],
+ resampler_margin_[n],
+ fifo_rate_comps_[n]);
+ ++n;
+ }
+ base::CloseFile(data_file_);
+
+ base::FilePath param_file_name;
+ PathService::Get(base::DIR_EXE, &param_file_name);
+ param_file_name = param_file_name.AppendASCII(kUnifiedAudioParamsFileName);
+ param_file_ = base::OpenFile(param_file_name, "wt");
+ DVLOG(1) << ">> Output file " << param_file_name.value() << " is created.";
+ fprintf(param_file_, "%d %d\n", input_params_[0], input_params_[1]);
+ fprintf(param_file_, "%d %d\n", output_params_[0], output_params_[1]);
+ base::CloseFile(param_file_);
+#endif
+}
+
+bool WASAPIUnifiedStream::Open() {
+ TRACE_EVENT0("audio", "WASAPIUnifiedStream::Open");
+ DVLOG(1) << "WASAPIUnifiedStream::Open()";
+ DCHECK_EQ(GetCurrentThreadId(), creating_thread_id_);
+ if (opened_)
+ return true;
+
+ AudioParameters hw_output_params;
+ HRESULT hr = CoreAudioUtil::GetPreferredAudioParameters(
+ eRender, eConsole, &hw_output_params);
+ if (FAILED(hr)) {
+ LOG(ERROR) << "Failed to get preferred output audio parameters.";
+ return false;
+ }
+
+ AudioParameters hw_input_params;
+ if (input_device_id_ == AudioManagerBase::kDefaultDeviceId) {
+ // Query native parameters for the default capture device.
+ hr = CoreAudioUtil::GetPreferredAudioParameters(
+ eCapture, eConsole, &hw_input_params);
+ } else {
+ // Query native parameters for the capture device given by
+ // |input_device_id_|.
+ hr = CoreAudioUtil::GetPreferredAudioParameters(
+ input_device_id_, &hw_input_params);
+ }
+ if (FAILED(hr)) {
+ LOG(ERROR) << "Failed to get preferred input audio parameters.";
+ return false;
+ }
+
+ // It is currently only possible to open up the output audio device using
+ // the native number of channels.
+ if (output_channels_ != hw_output_params.channels()) {
+ LOG(ERROR) << "Audio device does not support requested output channels.";
+ return false;
+ }
+
+ // It is currently only possible to open up the input audio device using
+ // the native number of channels. If the client asks for a higher channel
+ // count, we will do channel upmixing in this class. The most typical
+ // example is that the client provides stereo but the hardware can only be
+ // opened in mono mode. We will do mono to stereo conversion in this case.
+ if (input_channels_ < hw_input_params.channels()) {
+ LOG(ERROR) << "Audio device does not support requested input channels.";
+ return false;
+ } else if (input_channels_ > hw_input_params.channels()) {
+ ChannelLayout input_layout =
+ GuessChannelLayout(hw_input_params.channels());
+ ChannelLayout output_layout = GuessChannelLayout(input_channels_);
+ channel_mixer_.reset(new ChannelMixer(input_layout, output_layout));
+ DVLOG(1) << "Remixing input channel layout from " << input_layout
+ << " to " << output_layout << "; from "
+ << hw_input_params.channels() << " channels to "
+ << input_channels_;
+ }
+
+ if (hw_output_params.sample_rate() != params_.sample_rate()) {
+ LOG(ERROR) << "Requested sample-rate: " << params_.sample_rate()
+ << " must match the hardware sample-rate: "
+ << hw_output_params.sample_rate();
+ return false;
+ }
+
+ if (hw_output_params.frames_per_buffer() != params_.frames_per_buffer()) {
+ LOG(ERROR) << "Requested buffer size: " << params_.frames_per_buffer()
+ << " must match the hardware buffer size: "
+ << hw_output_params.frames_per_buffer();
+ return false;
+ }
+
+ // Set up WAVEFORMATPCMEX structures for input and output given the specified
+ // audio parameters.
+ SetIOFormats(hw_input_params, params_);
+
+ // Create the input and output busses.
+ input_bus_ = AudioBus::Create(
+ hw_input_params.channels(), input_buffer_size_frames_);
+ output_bus_ = AudioBus::Create(params_);
+
+ // One extra bus is needed for the input channel mixing case.
+ if (channel_mixer_) {
+ DCHECK_LT(hw_input_params.channels(), input_channels_);
+ // The size of the |channel_bus_| must be the same as the size of the
+ // output bus to ensure that the channel manager can deal with both
+ // resampled and non-resampled data as input.
+ channel_bus_ = AudioBus::Create(
+ input_channels_, params_.frames_per_buffer());
+ }
+
+ // Check if FIFO and resampling is required to match the input rate to the
+ // output rate. If so, a special thread loop, optimized for this case, will
+ // be used. This mode is also called varispeed mode.
+ // Note that we can also use this mode when input and output rates are the
+ // same but native buffer sizes differ (can happen if two different audio
+ // devices are used). For this case, the resampler uses a target ratio of
+ // 1.0 but SetRatio is called to compensate for clock-drift. The FIFO is
+ // required to compensate for the difference in buffer sizes.
+ // TODO(henrika): we could perhaps improve the performance for the second
+ // case here by only using the FIFO and avoid resampling. Not sure how much
+ // that would give and we risk not compensation for clock drift.
+ if (hw_input_params.sample_rate() != params_.sample_rate() ||
+ hw_input_params.frames_per_buffer() != params_.frames_per_buffer()) {
+ DoVarispeedInitialization(hw_input_params, params_);
+ }
+
+ // Render side (event driven only in varispeed mode):
+
+ ScopedComPtr<IAudioClient> audio_output_client =
+ CoreAudioUtil::CreateDefaultClient(eRender, eConsole);
+ if (!audio_output_client)
+ return false;
+
+ if (!CoreAudioUtil::IsFormatSupported(audio_output_client,
+ share_mode_,
+ &output_format_)) {
+ return false;
+ }
+
+ if (share_mode_ == AUDCLNT_SHAREMODE_SHARED) {
+ // The |render_event_| will be NULL unless varispeed mode is utilized.
+ hr = CoreAudioUtil::SharedModeInitialize(
+ audio_output_client, &output_format_, render_event_.Get(),
+ &endpoint_render_buffer_size_frames_);
+ } else {
+ // TODO(henrika): add support for AUDCLNT_SHAREMODE_EXCLUSIVE.
+ }
+ if (FAILED(hr))
+ return false;
+
+ ScopedComPtr<IAudioRenderClient> audio_render_client =
+ CoreAudioUtil::CreateRenderClient(audio_output_client);
+ if (!audio_render_client)
+ return false;
+
+ // Capture side (always event driven but format depends on varispeed or not):
+
+ ScopedComPtr<IAudioClient> audio_input_client;
+ if (input_device_id_ == AudioManagerBase::kDefaultDeviceId) {
+ audio_input_client = CoreAudioUtil::CreateDefaultClient(eCapture, eConsole);
+ } else {
+ ScopedComPtr<IMMDevice> audio_input_device(
+ CoreAudioUtil::CreateDevice(input_device_id_));
+ audio_input_client = CoreAudioUtil::CreateClient(audio_input_device);
+ }
+ if (!audio_input_client)
+ return false;
+
+ if (!CoreAudioUtil::IsFormatSupported(audio_input_client,
+ share_mode_,
+ &input_format_)) {
+ return false;
+ }
+
+ if (share_mode_ == AUDCLNT_SHAREMODE_SHARED) {
+ // Include valid event handle for event-driven initialization.
+ // The input side is always event driven independent of if varispeed is
+ // used or not.
+ hr = CoreAudioUtil::SharedModeInitialize(
+ audio_input_client, &input_format_, capture_event_.Get(),
+ &endpoint_capture_buffer_size_frames_);
+ } else {
+ // TODO(henrika): add support for AUDCLNT_SHAREMODE_EXCLUSIVE.
+ }
+ if (FAILED(hr))
+ return false;
+
+ ScopedComPtr<IAudioCaptureClient> audio_capture_client =
+ CoreAudioUtil::CreateCaptureClient(audio_input_client);
+ if (!audio_capture_client)
+ return false;
+
+ // Varispeed mode requires additional preparations.
+ if (VarispeedMode())
+ ResetVarispeed();
+
+ // Store all valid COM interfaces.
+ audio_output_client_ = audio_output_client;
+ audio_render_client_ = audio_render_client;
+ audio_input_client_ = audio_input_client;
+ audio_capture_client_ = audio_capture_client;
+
+ opened_ = true;
+ return SUCCEEDED(hr);
+}
+
+void WASAPIUnifiedStream::Start(AudioSourceCallback* callback) {
+ TRACE_EVENT0("audio", "WASAPIUnifiedStream::Start");
+ DVLOG(1) << "WASAPIUnifiedStream::Start()";
+ DCHECK_EQ(GetCurrentThreadId(), creating_thread_id_);
+ CHECK(callback);
+ CHECK(opened_);
+
+ if (audio_io_thread_) {
+ CHECK_EQ(callback, source_);
+ return;
+ }
+
+ source_ = callback;
+
+ if (VarispeedMode()) {
+ ResetVarispeed();
+ fifo_rate_compensation_ = 1.0;
+ average_delta_ = 0.0;
+ input_callback_received_ = false;
+ update_output_delay_ = false;
+ }
+
+ // Create and start the thread that will listen for capture events.
+ // We will also listen on render events on the same thread if varispeed
+ // mode is utilized.
+ audio_io_thread_.reset(
+ new base::DelegateSimpleThread(this, "wasapi_io_thread"));
+ audio_io_thread_->Start();
+ if (!audio_io_thread_->HasBeenStarted()) {
+ DLOG(ERROR) << "Failed to start WASAPI IO thread.";
+ return;
+ }
+
+ // Start input streaming data between the endpoint buffer and the audio
+ // engine.
+ HRESULT hr = audio_input_client_->Start();
+ if (FAILED(hr)) {
+ StopAndJoinThread(hr);
+ return;
+ }
+
+ // Ensure that the endpoint buffer is prepared with silence.
+ if (share_mode_ == AUDCLNT_SHAREMODE_SHARED) {
+ if (!CoreAudioUtil::FillRenderEndpointBufferWithSilence(
+ audio_output_client_, audio_render_client_)) {
+ DLOG(WARNING) << "Failed to prepare endpoint buffers with silence.";
+ return;
+ }
+ }
+ num_written_frames_ = endpoint_render_buffer_size_frames_;
+
+ // Start output streaming data between the endpoint buffer and the audio
+ // engine.
+ hr = audio_output_client_->Start();
+ if (FAILED(hr)) {
+ StopAndJoinThread(hr);
+ return;
+ }
+}
+
+void WASAPIUnifiedStream::Stop() {
+ TRACE_EVENT0("audio", "WASAPIUnifiedStream::Stop");
+ DVLOG(1) << "WASAPIUnifiedStream::Stop()";
+ DCHECK_EQ(GetCurrentThreadId(), creating_thread_id_);
+ if (!audio_io_thread_)
+ return;
+
+ // Stop input audio streaming.
+ HRESULT hr = audio_input_client_->Stop();
+ if (FAILED(hr)) {
+ DLOG_IF(ERROR, hr != AUDCLNT_E_NOT_INITIALIZED)
+ << "Failed to stop input streaming: " << std::hex << hr;
+ }
+
+ // Stop output audio streaming.
+ hr = audio_output_client_->Stop();
+ if (FAILED(hr)) {
+ DLOG_IF(ERROR, hr != AUDCLNT_E_NOT_INITIALIZED)
+ << "Failed to stop output streaming: " << std::hex << hr;
+ }
+
+ // Wait until the thread completes and perform cleanup.
+ SetEvent(stop_streaming_event_.Get());
+ audio_io_thread_->Join();
+ audio_io_thread_.reset();
+
+ // Ensure that we don't quit the main thread loop immediately next
+ // time Start() is called.
+ ResetEvent(stop_streaming_event_.Get());
+
+ // Clear source callback, it'll be set again on the next Start() call.
+ source_ = NULL;
+
+ // Flush all pending data and reset the audio clock stream position to 0.
+ hr = audio_output_client_->Reset();
+ if (FAILED(hr)) {
+ DLOG_IF(ERROR, hr != AUDCLNT_E_NOT_INITIALIZED)
+ << "Failed to reset output streaming: " << std::hex << hr;
+ }
+
+ audio_input_client_->Reset();
+ if (FAILED(hr)) {
+ DLOG_IF(ERROR, hr != AUDCLNT_E_NOT_INITIALIZED)
+ << "Failed to reset input streaming: " << std::hex << hr;
+ }
+
+ // Extra safety check to ensure that the buffers are cleared.
+ // If the buffers are not cleared correctly, the next call to Start()
+ // would fail with AUDCLNT_E_BUFFER_ERROR at IAudioRenderClient::GetBuffer().
+ // TODO(henrika): this check is is only needed for shared-mode streams.
+ UINT32 num_queued_frames = 0;
+ audio_output_client_->GetCurrentPadding(&num_queued_frames);
+ DCHECK_EQ(0u, num_queued_frames);
+}
+
+void WASAPIUnifiedStream::Close() {
+ TRACE_EVENT0("audio", "WASAPIUnifiedStream::Close");
+ DVLOG(1) << "WASAPIUnifiedStream::Close()";
+ DCHECK_EQ(GetCurrentThreadId(), creating_thread_id_);
+
+ // It is valid to call Close() before calling open or Start().
+ // It is also valid to call Close() after Start() has been called.
+ Stop();
+
+ // Inform the audio manager that we have been closed. This will cause our
+ // destruction.
+ manager_->ReleaseOutputStream(this);
+}
+
+void WASAPIUnifiedStream::SetVolume(double volume) {
+ DVLOG(1) << "SetVolume(volume=" << volume << ")";
+ if (volume < 0 || volume > 1)
+ return;
+ volume_ = volume;
+}
+
+void WASAPIUnifiedStream::GetVolume(double* volume) {
+ DVLOG(1) << "GetVolume()";
+ *volume = static_cast<double>(volume_);
+}
+
+
+void WASAPIUnifiedStream::ProvideInput(int frame_delay, AudioBus* audio_bus) {
+ // TODO(henrika): utilize frame_delay?
+ // A non-zero framed delay means multiple callbacks were necessary to
+ // fulfill the requested number of frames.
+ if (frame_delay > 0)
+ DVLOG(3) << "frame_delay: " << frame_delay;
+
+#ifndef NDEBUG
+ resampler_margin_[num_elements_[RESAMPLER_MARGIN]] =
+ fifo_->frames() - audio_bus->frames();
+ num_elements_[RESAMPLER_MARGIN]++;
+#endif
+
+ if (fifo_->frames() < audio_bus->frames()) {
+ DVLOG(ERROR) << "Not enough data in the FIFO ("
+ << fifo_->frames() << " < " << audio_bus->frames() << ")";
+ audio_bus->Zero();
+ return;
+ }
+
+ fifo_->Consume(audio_bus, 0, audio_bus->frames());
+}
+
+void WASAPIUnifiedStream::SetIOFormats(const AudioParameters& input_params,
+ const AudioParameters& output_params) {
+ for (int n = 0; n < 2; ++n) {
+ const AudioParameters& params = (n == 0) ? input_params : output_params;
+ WAVEFORMATPCMEX* xformat = (n == 0) ? &input_format_ : &output_format_;
+ WAVEFORMATEX* format = &xformat->Format;
+
+ // Begin with the WAVEFORMATEX structure that specifies the basic format.
+ format->wFormatTag = WAVE_FORMAT_EXTENSIBLE;
+ format->nChannels = params.channels();
+ format->nSamplesPerSec = params.sample_rate();
+ format->wBitsPerSample = params.bits_per_sample();
+ format->nBlockAlign = (format->wBitsPerSample / 8) * format->nChannels;
+ format->nAvgBytesPerSec = format->nSamplesPerSec * format->nBlockAlign;
+ format->cbSize = sizeof(WAVEFORMATEXTENSIBLE) - sizeof(WAVEFORMATEX);
+
+ // Add the parts which are unique to WAVE_FORMAT_EXTENSIBLE.
+ // Note that we always open up using the native channel layout.
+ (*xformat).Samples.wValidBitsPerSample = format->wBitsPerSample;
+ (*xformat).dwChannelMask =
+ CoreAudioUtil::GetChannelConfig(
+ std::string(), n == 0 ? eCapture : eRender);
+ (*xformat).SubFormat = KSDATAFORMAT_SUBTYPE_PCM;
+ }
+
+ input_buffer_size_frames_ = input_params.frames_per_buffer();
+ output_buffer_size_frames_ = output_params.frames_per_buffer();
+ VLOG(1) << "#audio frames per input buffer : " << input_buffer_size_frames_;
+ VLOG(1) << "#audio frames per output buffer: " << output_buffer_size_frames_;
+
+#ifndef NDEBUG
+ input_params_[0] = input_format_.Format.nSamplesPerSec;
+ input_params_[1] = input_buffer_size_frames_;
+ output_params_[0] = output_format_.Format.nSamplesPerSec;
+ output_params_[1] = output_buffer_size_frames_;
+#endif
+}
+
+void WASAPIUnifiedStream::DoVarispeedInitialization(
+ const AudioParameters& input_params, const AudioParameters& output_params) {
+ DVLOG(1) << "WASAPIUnifiedStream::DoVarispeedInitialization()";
+
+ // A FIFO is required in this mode for input to output buffering.
+ // Note that it will add some latency.
+ fifo_.reset(new AudioFifo(input_params.channels(), kFifoSize));
+ VLOG(1) << "Using FIFO of size " << fifo_->max_frames()
+ << " (#channels=" << input_params.channels() << ")";
+
+ // Create the multi channel resampler using the initial sample rate ratio.
+ // We will call MultiChannelResampler::SetRatio() during runtime to
+ // allow arbitrary combinations of input and output devices running off
+ // different clocks and using different drivers, with potentially
+ // differing sample-rates. Note that the requested block size is given by
+ // the native input buffer size |input_buffer_size_frames_|.
+ io_sample_rate_ratio_ = input_params.sample_rate() /
+ static_cast<double>(output_params.sample_rate());
+ DVLOG(2) << "io_sample_rate_ratio: " << io_sample_rate_ratio_;
+ resampler_.reset(new MultiChannelResampler(
+ input_params.channels(), io_sample_rate_ratio_, input_buffer_size_frames_,
+ base::Bind(&WASAPIUnifiedStream::ProvideInput, base::Unretained(this))));
+ VLOG(1) << "Resampling from " << input_params.sample_rate() << " to "
+ << output_params.sample_rate();
+
+ // The optimal number of frames we'd like to keep in the FIFO at all times.
+ // The actual size will vary but the goal is to ensure that the average size
+ // is given by this value.
+ target_fifo_frames_ = kTargetFifoSafetyFactor * input_buffer_size_frames_;
+ VLOG(1) << "Target FIFO size: " << target_fifo_frames_;
+
+ // Create the event which the audio engine will signal each time it
+ // wants an audio buffer to render.
+ render_event_.Set(CreateEvent(NULL, FALSE, FALSE, NULL));
+
+ // Allocate memory for temporary audio bus used to store resampled input
+ // audio.
+ resampled_bus_ = AudioBus::Create(
+ input_params.channels(), output_buffer_size_frames_);
+
+ // Buffer initial silence corresponding to target I/O buffering.
+ ResetVarispeed();
+}
+
+void WASAPIUnifiedStream::ResetVarispeed() {
+ DCHECK(VarispeedMode());
+
+ // Buffer initial silence corresponding to target I/O buffering.
+ fifo_->Clear();
+ scoped_ptr<AudioBus> silence =
+ AudioBus::Create(input_format_.Format.nChannels,
+ target_fifo_frames_);
+ silence->Zero();
+ fifo_->Push(silence.get());
+ resampler_->Flush();
+}
+
+void WASAPIUnifiedStream::Run() {
+ ScopedCOMInitializer com_init(ScopedCOMInitializer::kMTA);
+
+ // Increase the thread priority.
+ audio_io_thread_->SetThreadPriority(base::kThreadPriority_RealtimeAudio);
+
+ // Enable MMCSS to ensure that this thread receives prioritized access to
+ // CPU resources.
+ // TODO(henrika): investigate if it is possible to include these additional
+ // settings in SetThreadPriority() as well.
+ DWORD task_index = 0;
+ HANDLE mm_task = avrt::AvSetMmThreadCharacteristics(L"Pro Audio",
+ &task_index);
+ bool mmcss_is_ok =
+ (mm_task && avrt::AvSetMmThreadPriority(mm_task, AVRT_PRIORITY_CRITICAL));
+ if (!mmcss_is_ok) {
+ // Failed to enable MMCSS on this thread. It is not fatal but can lead
+ // to reduced QoS at high load.
+ DWORD err = GetLastError();
+ LOG(WARNING) << "Failed to enable MMCSS (error code=" << err << ").";
+ }
+
+ // The IAudioClock interface enables us to monitor a stream's data
+ // rate and the current position in the stream. Allocate it before we
+ // start spinning.
+ ScopedComPtr<IAudioClock> audio_output_clock;
+ HRESULT hr = audio_output_client_->GetService(
+ __uuidof(IAudioClock), audio_output_clock.ReceiveVoid());
+ LOG_IF(WARNING, FAILED(hr)) << "Failed to create IAudioClock: "
+ << std::hex << hr;
+
+ bool streaming = true;
+ bool error = false;
+
+ HANDLE wait_array[3];
+ size_t num_handles = 0;
+ wait_array[num_handles++] = stop_streaming_event_;
+ wait_array[num_handles++] = capture_event_;
+ if (render_event_) {
+ // One extra event handle is needed in varispeed mode.
+ wait_array[num_handles++] = render_event_;
+ }
+
+ // Keep streaming audio until stop event is signaled.
+ // Capture events are always used but render events are only active in
+ // varispeed mode.
+ while (streaming && !error) {
+ // Wait for a close-down event, or a new capture event.
+ DWORD wait_result = WaitForMultipleObjects(num_handles,
+ wait_array,
+ FALSE,
+ INFINITE);
+ switch (wait_result) {
+ case WAIT_OBJECT_0 + 0:
+ // |stop_streaming_event_| has been set.
+ streaming = false;
+ break;
+ case WAIT_OBJECT_0 + 1:
+ // |capture_event_| has been set
+ if (VarispeedMode()) {
+ ProcessInputAudio();
+ } else {
+ ProcessInputAudio();
+ ProcessOutputAudio(audio_output_clock);
+ }
+ break;
+ case WAIT_OBJECT_0 + 2:
+ DCHECK(VarispeedMode());
+ // |render_event_| has been set
+ ProcessOutputAudio(audio_output_clock);
+ break;
+ default:
+ error = true;
+ break;
+ }
+ }
+
+ if (streaming && error) {
+ // Stop audio streaming since something has gone wrong in our main thread
+ // loop. Note that, we are still in a "started" state, hence a Stop() call
+ // is required to join the thread properly.
+ audio_input_client_->Stop();
+ audio_output_client_->Stop();
+ PLOG(ERROR) << "WASAPI streaming failed.";
+ }
+
+ // Disable MMCSS.
+ if (mm_task && !avrt::AvRevertMmThreadCharacteristics(mm_task)) {
+ PLOG(WARNING) << "Failed to disable MMCSS";
+ }
+}
+
+void WASAPIUnifiedStream::ProcessInputAudio() {
+ TRACE_EVENT0("audio", "WASAPIUnifiedStream::ProcessInputAudio");
+
+ BYTE* data_ptr = NULL;
+ UINT32 num_captured_frames = 0;
+ DWORD flags = 0;
+ UINT64 device_position = 0;
+ UINT64 capture_time_stamp = 0;
+
+ const int bytes_per_sample = input_format_.Format.wBitsPerSample >> 3;
+
+ base::TimeTicks now_tick = base::TimeTicks::HighResNow();
+
+#ifndef NDEBUG
+ if (VarispeedMode()) {
+ input_time_stamps_[num_elements_[INPUT_TIME_STAMP]] =
+ now_tick.ToInternalValue();
+ num_elements_[INPUT_TIME_STAMP]++;
+ }
+#endif
+
+ // Retrieve the amount of data in the capture endpoint buffer.
+ // |endpoint_capture_time_stamp| is the value of the performance
+ // counter at the time that the audio endpoint device recorded
+ // the device position of the first audio frame in the data packet.
+ HRESULT hr = audio_capture_client_->GetBuffer(&data_ptr,
+ &num_captured_frames,
+ &flags,
+ &device_position,
+ &capture_time_stamp);
+ if (FAILED(hr)) {
+ DLOG(ERROR) << "Failed to get data from the capture buffer";
+ return;
+ }
+
+ if (hr == AUDCLNT_S_BUFFER_EMPTY) {
+ // The return coded is a success code but a new packet is *not* available
+ // and none of the output parameters in the GetBuffer() call contains valid
+ // values. Best we can do is to deliver silence and avoid setting
+ // |input_callback_received_| since this only seems to happen for the
+ // initial event(s) on some devices.
+ input_bus_->Zero();
+ } else {
+ // Valid data has been recorded and it is now OK to set the flag which
+ // informs the render side that capturing has started.
+ input_callback_received_ = true;
+ }
+
+ if (num_captured_frames != 0) {
+ if (flags & AUDCLNT_BUFFERFLAGS_SILENT) {
+ // Clear out the capture buffer since silence is reported.
+ input_bus_->Zero();
+ } else {
+ // Store captured data in an audio bus after de-interleaving
+ // the data to match the audio bus structure.
+ input_bus_->FromInterleaved(
+ data_ptr, num_captured_frames, bytes_per_sample);
+ }
+ }
+
+ hr = audio_capture_client_->ReleaseBuffer(num_captured_frames);
+ DLOG_IF(ERROR, FAILED(hr)) << "Failed to release capture buffer";
+
+ // Buffer input into FIFO if varispeed mode is used. The render event
+ // will drive resampling of this data to match the output side.
+ if (VarispeedMode()) {
+ int available_frames = fifo_->max_frames() - fifo_->frames();
+ if (input_bus_->frames() <= available_frames) {
+ fifo_->Push(input_bus_.get());
+ }
+#ifndef NDEBUG
+ num_frames_in_fifo_[num_elements_[NUM_FRAMES_IN_FIFO]] =
+ fifo_->frames();
+ num_elements_[NUM_FRAMES_IN_FIFO]++;
+#endif
+ }
+
+ // Save resource by not asking for new delay estimates each time.
+ // These estimates are fairly stable and it is perfectly safe to only
+ // sample at a rate of ~1Hz.
+ // TODO(henrika): we might have to increase the update rate in varispeed
+ // mode since the delay variations are higher in this mode.
+ if ((now_tick - last_delay_sample_time_).InMilliseconds() >
+ kTimeDiffInMillisecondsBetweenDelayMeasurements &&
+ input_callback_received_) {
+ // Calculate the estimated capture delay, i.e., the latency between
+ // the recording time and the time we when we are notified about
+ // the recorded data. Note that the capture time stamp is given in
+ // 100-nanosecond (0.1 microseconds) units.
+ base::TimeDelta diff =
+ now_tick - base::TimeTicks::FromInternalValue(0.1 * capture_time_stamp);
+ capture_delay_ms_ = diff.InMillisecondsF();
+
+ last_delay_sample_time_ = now_tick;
+ update_output_delay_ = true;
+ }
+}
+
+void WASAPIUnifiedStream::ProcessOutputAudio(IAudioClock* audio_output_clock) {
+ TRACE_EVENT0("audio", "WASAPIUnifiedStream::ProcessOutputAudio");
+
+ if (!input_callback_received_) {
+ if (share_mode_ == AUDCLNT_SHAREMODE_SHARED) {
+ if (!CoreAudioUtil::FillRenderEndpointBufferWithSilence(
+ audio_output_client_, audio_render_client_))
+ DLOG(WARNING) << "Failed to prepare endpoint buffers with silence.";
+ }
+ return;
+ }
+
+ // Rate adjusted resampling is required in varispeed mode. It means that
+ // recorded audio samples will be read from the FIFO, resampled to match the
+ // output sample-rate and then stored in |resampled_bus_|.
+ if (VarispeedMode()) {
+ // Calculate a varispeed rate scalar factor to compensate for drift between
+ // input and output. We use the actual number of frames still in the FIFO
+ // compared with the ideal value of |target_fifo_frames_|.
+ int delta = fifo_->frames() - target_fifo_frames_;
+
+ // Average |delta| because it can jitter back/forth quite frequently
+ // by +/- the hardware buffer-size *if* the input and output callbacks are
+ // happening at almost exactly the same time. Also, if the input and output
+ // sample-rates are different then |delta| will jitter quite a bit due to
+ // the rate conversion happening in the varispeed, plus the jittering of
+ // the callbacks. The average value is what's important here.
+ // We use an exponential smoothing filter to reduce the variations.
+ average_delta_ += kAlpha * (delta - average_delta_);
+
+ // Compute a rate compensation which always attracts us back to the
+ // |target_fifo_frames_| over a period of kCorrectionTimeSeconds.
+ double correction_time_frames =
+ kCorrectionTimeSeconds * output_format_.Format.nSamplesPerSec;
+ fifo_rate_compensation_ =
+ (correction_time_frames + average_delta_) / correction_time_frames;
+
+#ifndef NDEBUG
+ fifo_rate_comps_[num_elements_[RATE_COMPENSATION]] =
+ fifo_rate_compensation_;
+ num_elements_[RATE_COMPENSATION]++;
+#endif
+
+ // Adjust for FIFO drift.
+ const double new_ratio = io_sample_rate_ratio_ * fifo_rate_compensation_;
+ resampler_->SetRatio(new_ratio);
+ // Get resampled input audio from FIFO where the size is given by the
+ // output side.
+ resampler_->Resample(resampled_bus_->frames(), resampled_bus_.get());
+ }
+
+ // Derive a new total delay estimate if the capture side has set the
+ // |update_output_delay_| flag.
+ if (update_output_delay_) {
+ // Calculate the estimated render delay, i.e., the time difference
+ // between the time when data is added to the endpoint buffer and
+ // when the data is played out on the actual speaker.
+ const double stream_pos = CurrentStreamPosInMilliseconds(
+ num_written_frames_ + output_buffer_size_frames_,
+ output_format_.Format.nSamplesPerSec);
+ const double speaker_pos =
+ SpeakerStreamPosInMilliseconds(audio_output_clock);
+ const double render_delay_ms = stream_pos - speaker_pos;
+ const double fifo_delay_ms = VarispeedMode() ?
+ FrameCountToMilliseconds(target_fifo_frames_, input_format_) : 0;
+
+ // Derive the total delay, i.e., the sum of the input and output
+ // delays. Also convert the value into byte units. An extra FIFO delay
+ // is added for varispeed usage cases.
+ total_delay_ms_ = VarispeedMode() ?
+ capture_delay_ms_ + render_delay_ms + fifo_delay_ms :
+ capture_delay_ms_ + render_delay_ms;
+ DVLOG(2) << "total_delay_ms : " << total_delay_ms_;
+ DVLOG(3) << " capture_delay_ms: " << capture_delay_ms_;
+ DVLOG(3) << " render_delay_ms : " << render_delay_ms;
+ DVLOG(3) << " fifo_delay_ms : " << fifo_delay_ms;
+ total_delay_bytes_ = MillisecondsToBytes(total_delay_ms_, output_format_);
+
+ // Wait for new signal from the capture side.
+ update_output_delay_ = false;
+ }
+
+ // Select source depending on if varispeed is utilized or not.
+ // Also, the source might be the output of a channel mixer if channel mixing
+ // is required to match the native input channels to the number of input
+ // channels used by the client (given by |input_channels_| in this case).
+ AudioBus* input_bus = VarispeedMode() ?
+ resampled_bus_.get() : input_bus_.get();
+ if (channel_mixer_) {
+ DCHECK_EQ(input_bus->frames(), channel_bus_->frames());
+ // Most common case is 1->2 channel upmixing.
+ channel_mixer_->Transform(input_bus, channel_bus_.get());
+ // Use the output from the channel mixer as new input bus.
+ input_bus = channel_bus_.get();
+ }
+
+ // Prepare for rendering by calling OnMoreIOData().
+ int frames_filled = source_->OnMoreIOData(
+ input_bus,
+ output_bus_.get(),
+ AudioBuffersState(0, total_delay_bytes_));
+ DCHECK_EQ(frames_filled, output_bus_->frames());
+
+ // Keep track of number of rendered frames since we need it for
+ // our delay calculations.
+ num_written_frames_ += frames_filled;
+
+ // Derive the the amount of available space in the endpoint buffer.
+ // Avoid render attempt if there is no room for a captured packet.
+ UINT32 num_queued_frames = 0;
+ audio_output_client_->GetCurrentPadding(&num_queued_frames);
+ if (endpoint_render_buffer_size_frames_ - num_queued_frames <
+ output_buffer_size_frames_)
+ return;
+
+ // Grab all available space in the rendering endpoint buffer
+ // into which the client can write a data packet.
+ uint8* audio_data = NULL;
+ HRESULT hr = audio_render_client_->GetBuffer(output_buffer_size_frames_,
+ &audio_data);
+ if (FAILED(hr)) {
+ DLOG(ERROR) << "Failed to access render buffer";
+ return;
+ }
+
+ const int bytes_per_sample = output_format_.Format.wBitsPerSample >> 3;
+
+ // Convert the audio bus content to interleaved integer data using
+ // |audio_data| as destination.
+ output_bus_->Scale(volume_);
+ output_bus_->ToInterleaved(
+ output_buffer_size_frames_, bytes_per_sample, audio_data);
+
+ // Release the buffer space acquired in the GetBuffer() call.
+ audio_render_client_->ReleaseBuffer(output_buffer_size_frames_, 0);
+ DLOG_IF(ERROR, FAILED(hr)) << "Failed to release render buffer";
+
+ return;
+}
+
+void WASAPIUnifiedStream::HandleError(HRESULT err) {
+ CHECK((started() && GetCurrentThreadId() == audio_io_thread_->tid()) ||
+ (!started() && GetCurrentThreadId() == creating_thread_id_));
+ NOTREACHED() << "Error code: " << std::hex << err;
+ if (source_)
+ source_->OnError(this);
+}
+
+void WASAPIUnifiedStream::StopAndJoinThread(HRESULT err) {
+ CHECK(GetCurrentThreadId() == creating_thread_id_);
+ DCHECK(audio_io_thread_.get());
+ SetEvent(stop_streaming_event_.Get());
+ audio_io_thread_->Join();
+ audio_io_thread_.reset();
+ HandleError(err);
+}
+
+} // namespace media
diff --git a/media/audio/win/audio_unified_win.h b/media/audio/win/audio_unified_win.h
new file mode 100644
index 0000000..76c5329
--- /dev/null
+++ b/media/audio/win/audio_unified_win.h
@@ -0,0 +1,352 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef MEDIA_AUDIO_WIN_AUDIO_UNIFIED_WIN_H_
+#define MEDIA_AUDIO_WIN_AUDIO_UNIFIED_WIN_H_
+
+#include <Audioclient.h>
+#include <MMDeviceAPI.h>
+
+#include <string>
+
+#include "base/compiler_specific.h"
+#include "base/gtest_prod_util.h"
+#include "base/threading/platform_thread.h"
+#include "base/threading/simple_thread.h"
+#include "base/win/scoped_co_mem.h"
+#include "base/win/scoped_comptr.h"
+#include "base/win/scoped_handle.h"
+#include "media/audio/audio_io.h"
+#include "media/audio/audio_parameters.h"
+#include "media/base/audio_fifo.h"
+#include "media/base/channel_mixer.h"
+#include "media/base/media_export.h"
+#include "media/base/multi_channel_resampler.h"
+
+namespace media {
+
+class AudioManagerWin;
+
+// Implementation of AudioOutputStream for Windows using the Core Audio API
+// where both capturing and rendering takes place on the same thread to enable
+// audio I/O. This class allows arbitrary combinations of input and output
+// devices running off different clocks and using different drivers, with
+// potentially differing sample-rates.
+//
+// It is required to first acquire the native sample rate of the selected
+// output device and then use the same rate when creating this object.
+// The inner operation depends on the input sample rate which is determined
+// during construction. Three different main modes are supported:
+//
+// 1) input rate == output rate => input side drives output side directly.
+// 2) input rate != output rate => both sides are driven independently by
+// events and a FIFO plus a resampling unit is used to compensate for
+// differences in sample rates between the two sides.
+// 3) input rate == output rate but native buffer sizes are not identical =>
+// same inner functionality as in (2) to compensate for the differences
+// in buffer sizes and also compensate for any potential clock drift
+// between the two devices.
+//
+// Mode detection is is done at construction and using mode (1) will lead to
+// best performance (lower delay and no "varispeed distortion"), i.e., it is
+// recommended to use same sample rates for input and output. Mode (2) uses a
+// resampler which supports rate adjustments to fine tune for things like
+// clock drift and differences in sample rates between different devices.
+// Mode (2) - which uses a FIFO and a adjustable multi-channel resampler -
+// is also called the varispeed mode and it is used for case (3) as well to
+// compensate for the difference in buffer sizes mainly.
+// Mode (3) can happen if two different audio devices are used.
+// As an example: some devices needs a buffer size of 441 @ 44.1kHz and others
+// 448 @ 44.1kHz. This is a rare case and will only happen for sample rates
+// which are even multiples of 11025 Hz (11025, 22050, 44100, 88200 etc.).
+//
+// Implementation notes:
+//
+// - Open() can fail if the input and output parameters do not fulfill
+// certain conditions. See source for Open() for more details.
+// - Channel mixing will be performed if the clients asks for a larger
+// number of channels than the native audio layer provides.
+// Example: client wants stereo but audio layer provides mono. In this case
+// upmixing from mono to stereo (1->2) will be done.
+//
+// TODO(henrika):
+//
+// - Add support for exclusive mode.
+// - Add support for KSDATAFORMAT_SUBTYPE_IEEE_FLOAT, i.e., 32-bit float
+// as internal sample-value representation.
+// - Perform fine-tuning for non-matching sample rates to reduce latency.
+//
+class MEDIA_EXPORT WASAPIUnifiedStream
+ : public AudioOutputStream,
+ public base::DelegateSimpleThread::Delegate {
+ public:
+ // The ctor takes all the usual parameters, plus |manager| which is the
+ // the audio manager who is creating this object.
+ WASAPIUnifiedStream(AudioManagerWin* manager,
+ const AudioParameters& params,
+ const std::string& input_device_id);
+
+ // The dtor is typically called by the AudioManager only and it is usually
+ // triggered by calling AudioOutputStream::Close().
+ virtual ~WASAPIUnifiedStream();
+
+ // Implementation of AudioOutputStream.
+ virtual bool Open() OVERRIDE;
+ virtual void Start(AudioSourceCallback* callback) OVERRIDE;
+ virtual void Stop() OVERRIDE;
+ virtual void Close() OVERRIDE;
+ virtual void SetVolume(double volume) OVERRIDE;
+ virtual void GetVolume(double* volume) OVERRIDE;
+
+ bool started() const {
+ return audio_io_thread_.get() != NULL;
+ }
+
+ // Returns true if input sample rate differs from the output sample rate.
+ // A FIFO and a adjustable multi-channel resampler are utilized in this mode.
+ bool VarispeedMode() const { return (fifo_ && resampler_); }
+
+ private:
+ enum {
+ // Time in milliseconds between two successive delay measurements.
+ // We save resources by not updating the delay estimates for each capture
+ // event (typically 100Hz rate).
+ kTimeDiffInMillisecondsBetweenDelayMeasurements = 1000,
+
+ // Max possible FIFO size.
+ kFifoSize = 16384,
+
+ // This value was determined empirically for minimum latency while still
+ // guarding against FIFO under-runs. The actual target size will be equal
+ // to kTargetFifoSafetyFactor * (native input buffer size).
+ // TODO(henrika): tune this value for lowest possible latency for all
+ // possible sample rate combinations.
+ kTargetFifoSafetyFactor = 2
+ };
+
+ // Additional initialization required when input and output sample rate
+ // differs. Allocates resources for |fifo_|, |resampler_|, |render_event_|,
+ // and the |capture_bus_| and configures the |input_format_| structure
+ // given the provided input and output audio parameters.
+ void DoVarispeedInitialization(const AudioParameters& input_params,
+ const AudioParameters& output_params);
+
+ // Clears varispeed related components such as the FIFO and the resampler.
+ void ResetVarispeed();
+
+ // Builds WAVEFORMATEX structures for input and output based on input and
+ // output audio parameters.
+ void SetIOFormats(const AudioParameters& input_params,
+ const AudioParameters& output_params);
+
+ // DelegateSimpleThread::Delegate implementation.
+ virtual void Run() OVERRIDE;
+
+ // MultiChannelResampler::MultiChannelAudioSourceProvider implementation.
+ // Callback for providing more data into the resampler.
+ // Only used in varispeed mode, i.e., when input rate != output rate.
+ virtual void ProvideInput(int frame_delay, AudioBus* audio_bus);
+
+ // Issues the OnError() callback to the |source_|.
+ void HandleError(HRESULT err);
+
+ // Stops and joins the audio thread in case of an error.
+ void StopAndJoinThread(HRESULT err);
+
+ // Converts unique endpoint ID to user-friendly device name.
+ std::string GetDeviceName(LPCWSTR device_id) const;
+
+ // Called on the audio IO thread for each capture event.
+ // Buffers captured audio into a FIFO if varispeed is used or into an audio
+ // bus if input and output sample rates are identical.
+ void ProcessInputAudio();
+
+ // Called on the audio IO thread for each render event when varispeed is
+ // active or for each capture event when varispeed is not used.
+ // In varispeed mode, it triggers a resampling callback, which reads from the
+ // FIFO, and calls AudioSourceCallback::OnMoreIOData using the resampled
+ // input signal and at the same time asks for data to play out.
+ // If input and output rates are the same - instead of reading from the FIFO
+ // and do resampling - we read directly from the audio bus used to store
+ // captured data in ProcessInputAudio.
+ void ProcessOutputAudio(IAudioClock* audio_output_clock);
+
+ // Contains the thread ID of the creating thread.
+ base::PlatformThreadId creating_thread_id_;
+
+ // Our creator, the audio manager needs to be notified when we close.
+ AudioManagerWin* manager_;
+
+ // Contains the audio parameter structure provided at construction.
+ AudioParameters params_;
+ // For convenience, same as in params_.
+ int input_channels_;
+ int output_channels_;
+
+ // Unique ID of the input device to be opened.
+ const std::string input_device_id_;
+
+ // The sharing mode for the streams.
+ // Valid values are AUDCLNT_SHAREMODE_SHARED and AUDCLNT_SHAREMODE_EXCLUSIVE
+ // where AUDCLNT_SHAREMODE_SHARED is the default.
+ AUDCLNT_SHAREMODE share_mode_;
+
+ // Rendering and capturing is driven by this thread (no message loop).
+ // All OnMoreIOData() callbacks will be called from this thread.
+ scoped_ptr<base::DelegateSimpleThread> audio_io_thread_;
+
+ // Contains the desired audio output format which is set up at construction.
+ // It is required to first acquire the native sample rate of the selected
+ // output device and then use the same rate when creating this object.
+ WAVEFORMATPCMEX output_format_;
+
+ // Contains the native audio input format which is set up at construction
+ // if varispeed mode is utilized.
+ WAVEFORMATPCMEX input_format_;
+
+ // True when successfully opened.
+ bool opened_;
+
+ // Volume level from 0 to 1 used for output scaling.
+ double volume_;
+
+ // Size in audio frames of each audio packet where an audio packet
+ // is defined as the block of data which the destination is expected to
+ // receive in each OnMoreIOData() callback.
+ size_t output_buffer_size_frames_;
+
+ // Size in audio frames of each audio packet where an audio packet
+ // is defined as the block of data which the source is expected to
+ // deliver in each OnMoreIOData() callback.
+ size_t input_buffer_size_frames_;
+
+ // Length of the audio endpoint buffer.
+ uint32 endpoint_render_buffer_size_frames_;
+ uint32 endpoint_capture_buffer_size_frames_;
+
+ // Counts the number of audio frames written to the endpoint buffer.
+ uint64 num_written_frames_;
+
+ // Time stamp for last delay measurement.
+ base::TimeTicks last_delay_sample_time_;
+
+ // Contains the total (sum of render and capture) delay in milliseconds.
+ double total_delay_ms_;
+
+ // Contains the total (sum of render and capture and possibly FIFO) delay
+ // in bytes. The update frequency is set by a constant called
+ // |kTimeDiffInMillisecondsBetweenDelayMeasurements|.
+ int total_delay_bytes_;
+
+ // Pointer to the client that will deliver audio samples to be played out.
+ AudioSourceCallback* source_;
+
+ // IMMDevice interfaces which represents audio endpoint devices.
+ base::win::ScopedComPtr<IMMDevice> endpoint_render_device_;
+ base::win::ScopedComPtr<IMMDevice> endpoint_capture_device_;
+
+ // IAudioClient interfaces which enables a client to create and initialize
+ // an audio stream between an audio application and the audio engine.
+ base::win::ScopedComPtr<IAudioClient> audio_output_client_;
+ base::win::ScopedComPtr<IAudioClient> audio_input_client_;
+
+ // IAudioRenderClient interfaces enables a client to write output
+ // data to a rendering endpoint buffer.
+ base::win::ScopedComPtr<IAudioRenderClient> audio_render_client_;
+
+ // IAudioCaptureClient interfaces enables a client to read input
+ // data from a capturing endpoint buffer.
+ base::win::ScopedComPtr<IAudioCaptureClient> audio_capture_client_;
+
+ // The audio engine will signal this event each time a buffer has been
+ // recorded.
+ base::win::ScopedHandle capture_event_;
+
+ // The audio engine will signal this event each time it needs a new
+ // audio buffer to play out.
+ // Only utilized in varispeed mode.
+ base::win::ScopedHandle render_event_;
+
+ // This event will be signaled when streaming shall stop.
+ base::win::ScopedHandle stop_streaming_event_;
+
+ // Container for retrieving data from AudioSourceCallback::OnMoreIOData().
+ scoped_ptr<AudioBus> output_bus_;
+
+ // Container for sending data to AudioSourceCallback::OnMoreIOData().
+ scoped_ptr<AudioBus> input_bus_;
+
+ // Container for storing output from the channel mixer.
+ scoped_ptr<AudioBus> channel_bus_;
+
+ // All members below are only allocated, or used, in varispeed mode:
+
+ // Temporary storage of resampled input audio data.
+ scoped_ptr<AudioBus> resampled_bus_;
+
+ // Set to true first time a capture event has been received in varispeed
+ // mode.
+ bool input_callback_received_;
+
+ // MultiChannelResampler is a multi channel wrapper for SincResampler;
+ // allowing high quality sample rate conversion of multiple channels at once.
+ scoped_ptr<MultiChannelResampler> resampler_;
+
+ // Resampler I/O ratio.
+ double io_sample_rate_ratio_;
+
+ // Used for input to output buffering.
+ scoped_ptr<AudioFifo> fifo_;
+
+ // The channel mixer is only created and utilized if number of input channels
+ // is larger than the native number of input channels (e.g client wants
+ // stereo but the audio device only supports mono).
+ scoped_ptr<ChannelMixer> channel_mixer_;
+
+ // The optimal number of frames we'd like to keep in the FIFO at all times.
+ int target_fifo_frames_;
+
+ // A running average of the measured delta between actual number of frames
+ // in the FIFO versus |target_fifo_frames_|.
+ double average_delta_;
+
+ // A varispeed rate scalar which is calculated based on FIFO drift.
+ double fifo_rate_compensation_;
+
+ // Set to true when input side signals output side that a new delay
+ // estimate is needed.
+ bool update_output_delay_;
+
+ // Capture side stores its delay estimate so the sum can be derived in
+ // the render side.
+ double capture_delay_ms_;
+
+ // TODO(henrika): possibly remove these members once the performance is
+ // properly tuned. Only used for off-line debugging.
+#ifndef NDEBUG
+ enum LogElementNames {
+ INPUT_TIME_STAMP,
+ NUM_FRAMES_IN_FIFO,
+ RESAMPLER_MARGIN,
+ RATE_COMPENSATION
+ };
+
+ scoped_ptr<int64[]> input_time_stamps_;
+ scoped_ptr<int[]> num_frames_in_fifo_;
+ scoped_ptr<int[]> resampler_margin_;
+ scoped_ptr<double[]> fifo_rate_comps_;
+ scoped_ptr<int[]> num_elements_;
+ scoped_ptr<int[]> input_params_;
+ scoped_ptr<int[]> output_params_;
+
+ FILE* data_file_;
+ FILE* param_file_;
+#endif
+
+ DISALLOW_COPY_AND_ASSIGN(WASAPIUnifiedStream);
+};
+
+} // namespace media
+
+#endif // MEDIA_AUDIO_WIN_AUDIO_UNIFIED_WIN_H_
diff --git a/media/audio/win/audio_unified_win_unittest.cc b/media/audio/win/audio_unified_win_unittest.cc
new file mode 100644
index 0000000..fadec61
--- /dev/null
+++ b/media/audio/win/audio_unified_win_unittest.cc
@@ -0,0 +1,356 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/basictypes.h"
+#include "base/command_line.h"
+#include "base/file_util.h"
+#include "base/message_loop/message_loop.h"
+#include "base/path_service.h"
+#include "base/test/test_timeouts.h"
+#include "base/time/time.h"
+#include "base/win/scoped_com_initializer.h"
+#include "media/audio/audio_io.h"
+#include "media/audio/audio_manager.h"
+#include "media/audio/mock_audio_source_callback.h"
+#include "media/audio/win/audio_unified_win.h"
+#include "media/audio/win/core_audio_util_win.h"
+#include "media/base/channel_mixer.h"
+#include "media/base/media_switches.h"
+#include "testing/gmock/include/gmock/gmock.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+using ::testing::_;
+using ::testing::AtLeast;
+using ::testing::Between;
+using ::testing::DoAll;
+using ::testing::NotNull;
+using ::testing::Return;
+using base::win::ScopedCOMInitializer;
+
+namespace media {
+
+static const size_t kMaxDeltaSamples = 1000;
+static const char kDeltaTimeMsFileName[] = "unified_delta_times_ms.txt";
+
+// Verify that the delay estimate in the OnMoreIOData() callback is larger
+// than an expected minumum value.
+MATCHER_P(DelayGreaterThan, value, "") {
+ return (arg.hardware_delay_bytes > value.hardware_delay_bytes);
+}
+
+// Used to terminate a loop from a different thread than the loop belongs to.
+// |loop| should be a MessageLoopProxy.
+ACTION_P(QuitLoop, loop) {
+ loop->PostTask(FROM_HERE, base::MessageLoop::QuitClosure());
+}
+
+// AudioOutputStream::AudioSourceCallback implementation which enables audio
+// play-through. It also creates a text file that contains times between two
+// successive callbacks. Units are in milliseconds. This file can be used for
+// off-line analysis of the callback sequence.
+class UnifiedSourceCallback : public AudioOutputStream::AudioSourceCallback {
+ public:
+ explicit UnifiedSourceCallback()
+ : previous_call_time_(base::TimeTicks::Now()),
+ text_file_(NULL),
+ elements_to_write_(0) {
+ delta_times_.reset(new int[kMaxDeltaSamples]);
+ }
+
+ virtual ~UnifiedSourceCallback() {
+ base::FilePath file_name;
+ EXPECT_TRUE(PathService::Get(base::DIR_EXE, &file_name));
+ file_name = file_name.AppendASCII(kDeltaTimeMsFileName);
+
+ EXPECT_TRUE(!text_file_);
+ text_file_ = base::OpenFile(file_name, "wt");
+ DLOG_IF(ERROR, !text_file_) << "Failed to open log file.";
+ VLOG(0) << ">> Output file " << file_name.value() << " has been created.";
+
+ // Write the array which contains delta times to a text file.
+ size_t elements_written = 0;
+ while (elements_written < elements_to_write_) {
+ fprintf(text_file_, "%d\n", delta_times_[elements_written]);
+ ++elements_written;
+ }
+ base::CloseFile(text_file_);
+ }
+
+ virtual int OnMoreData(AudioBus* dest,
+ AudioBuffersState buffers_state) {
+ NOTREACHED();
+ return 0;
+ };
+
+ virtual int OnMoreIOData(AudioBus* source,
+ AudioBus* dest,
+ AudioBuffersState buffers_state) {
+ // Store time between this callback and the previous callback.
+ const base::TimeTicks now_time = base::TimeTicks::Now();
+ const int diff = (now_time - previous_call_time_).InMilliseconds();
+ previous_call_time_ = now_time;
+ if (elements_to_write_ < kMaxDeltaSamples) {
+ delta_times_[elements_to_write_] = diff;
+ ++elements_to_write_;
+ }
+
+ // Play out the recorded audio samples in loop back. Perform channel mixing
+ // if required using a channel mixer which is created only if needed.
+ if (source->channels() == dest->channels()) {
+ source->CopyTo(dest);
+ } else {
+ // A channel mixer is required for converting audio between two different
+ // channel layouts.
+ if (!channel_mixer_) {
+ // Guessing the channel layout will work OK for this unit test.
+ // Main thing is that the number of channels is correct.
+ ChannelLayout input_layout = GuessChannelLayout(source->channels());
+ ChannelLayout output_layout = GuessChannelLayout(dest->channels());
+ channel_mixer_.reset(new ChannelMixer(input_layout, output_layout));
+ DVLOG(1) << "Remixing channel layout from " << input_layout
+ << " to " << output_layout << "; from "
+ << source->channels() << " channels to "
+ << dest->channels() << " channels.";
+ }
+ if (channel_mixer_)
+ channel_mixer_->Transform(source, dest);
+ }
+ return source->frames();
+ };
+
+ virtual void OnError(AudioOutputStream* stream) {
+ NOTREACHED();
+ }
+
+ private:
+ base::TimeTicks previous_call_time_;
+ scoped_ptr<int[]> delta_times_;
+ FILE* text_file_;
+ size_t elements_to_write_;
+ scoped_ptr<ChannelMixer> channel_mixer_;
+};
+
+// Convenience method which ensures that we fulfill all required conditions
+// to run unified audio tests on Windows.
+static bool CanRunUnifiedAudioTests(AudioManager* audio_man) {
+ if (!CoreAudioUtil::IsSupported()) {
+ LOG(WARNING) << "This tests requires Windows Vista or higher.";
+ return false;
+ }
+
+ if (!audio_man->HasAudioOutputDevices()) {
+ LOG(WARNING) << "No output devices detected.";
+ return false;
+ }
+
+ if (!audio_man->HasAudioInputDevices()) {
+ LOG(WARNING) << "No input devices detected.";
+ return false;
+ }
+
+ return true;
+}
+
+// Convenience class which simplifies creation of a unified AudioOutputStream
+// object.
+class AudioUnifiedStreamWrapper {
+ public:
+ explicit AudioUnifiedStreamWrapper(AudioManager* audio_manager)
+ : com_init_(ScopedCOMInitializer::kMTA),
+ audio_man_(audio_manager) {
+ // We open up both both sides (input and output) using the preferred
+ // set of audio parameters. These parameters corresponds to the mix format
+ // that the audio engine uses internally for processing of shared-mode
+ // output streams.
+ AudioParameters out_params;
+ EXPECT_TRUE(SUCCEEDED(CoreAudioUtil::GetPreferredAudioParameters(
+ eRender, eConsole, &out_params)));
+
+ // WebAudio is the only real user of unified audio and it always asks
+ // for stereo.
+ // TODO(henrika): extend support to other input channel layouts as well.
+ const int kInputChannels = 2;
+
+ params_.Reset(out_params.format(),
+ out_params.channel_layout(),
+ out_params.channels(),
+ kInputChannels,
+ out_params.sample_rate(),
+ out_params.bits_per_sample(),
+ out_params.frames_per_buffer());
+ }
+
+ ~AudioUnifiedStreamWrapper() {}
+
+ // Creates an AudioOutputStream object using default parameters.
+ WASAPIUnifiedStream* Create() {
+ return static_cast<WASAPIUnifiedStream*>(CreateOutputStream());
+ }
+
+ // Creates an AudioOutputStream object using default parameters but a
+ // specified input device.
+ WASAPIUnifiedStream* Create(const std::string device_id) {
+ return static_cast<WASAPIUnifiedStream*>(CreateOutputStream(device_id));
+ }
+
+ AudioParameters::Format format() const { return params_.format(); }
+ int channels() const { return params_.channels(); }
+ int bits_per_sample() const { return params_.bits_per_sample(); }
+ int sample_rate() const { return params_.sample_rate(); }
+ int frames_per_buffer() const { return params_.frames_per_buffer(); }
+ int bytes_per_buffer() const { return params_.GetBytesPerBuffer(); }
+ int input_channels() const { return params_.input_channels(); }
+
+ private:
+ AudioOutputStream* CreateOutputStream() {
+ // Get the unique device ID of the default capture device instead of using
+ // AudioManagerBase::kDefaultDeviceId since it provides slightly better
+ // test coverage and will utilize the same code path as if a non default
+ // input device was used.
+ ScopedComPtr<IMMDevice> audio_device =
+ CoreAudioUtil::CreateDefaultDevice(eCapture, eConsole);
+ AudioDeviceName name;
+ EXPECT_TRUE(SUCCEEDED(CoreAudioUtil::GetDeviceName(audio_device, &name)));
+ const std::string& input_device_id = name.unique_id;
+ EXPECT_TRUE(CoreAudioUtil::DeviceIsDefault(eCapture, eConsole,
+ input_device_id));
+
+ // Create the unified audio I/O stream using the default input device.
+ AudioOutputStream* aos = audio_man_->MakeAudioOutputStream(params_,
+ "", input_device_id);
+ EXPECT_TRUE(aos);
+ return aos;
+ }
+
+ AudioOutputStream* CreateOutputStream(const std::string& input_device_id) {
+ // Create the unified audio I/O stream using the specified input device.
+ AudioOutputStream* aos = audio_man_->MakeAudioOutputStream(params_,
+ "", input_device_id);
+ EXPECT_TRUE(aos);
+ return aos;
+ }
+
+ ScopedCOMInitializer com_init_;
+ AudioManager* audio_man_;
+ AudioParameters params_;
+};
+
+// Convenience method which creates a default WASAPIUnifiedStream object.
+static WASAPIUnifiedStream* CreateDefaultUnifiedStream(
+ AudioManager* audio_manager) {
+ AudioUnifiedStreamWrapper aosw(audio_manager);
+ return aosw.Create();
+}
+
+// Convenience method which creates a default WASAPIUnifiedStream object but
+// with a specified audio input device.
+static WASAPIUnifiedStream* CreateDefaultUnifiedStream(
+ AudioManager* audio_manager, const std::string& device_id) {
+ AudioUnifiedStreamWrapper aosw(audio_manager);
+ return aosw.Create(device_id);
+}
+
+// Test Open(), Close() calling sequence.
+TEST(WASAPIUnifiedStreamTest, OpenAndClose) {
+ scoped_ptr<AudioManager> audio_manager(AudioManager::CreateForTesting());
+ if (!CanRunUnifiedAudioTests(audio_manager.get()))
+ return;
+
+ WASAPIUnifiedStream* wus = CreateDefaultUnifiedStream(audio_manager.get());
+ EXPECT_TRUE(wus->Open());
+ wus->Close();
+}
+
+// Test Open(), Close() calling sequence for all available capture devices.
+TEST(WASAPIUnifiedStreamTest, OpenAndCloseForAllInputDevices) {
+ scoped_ptr<AudioManager> audio_manager(AudioManager::CreateForTesting());
+ if (!CanRunUnifiedAudioTests(audio_manager.get()))
+ return;
+
+ AudioDeviceNames device_names;
+ audio_manager->GetAudioInputDeviceNames(&device_names);
+ for (AudioDeviceNames::iterator i = device_names.begin();
+ i != device_names.end(); ++i) {
+ WASAPIUnifiedStream* wus = CreateDefaultUnifiedStream(
+ audio_manager.get(), i->unique_id);
+ EXPECT_TRUE(wus->Open());
+ wus->Close();
+ }
+}
+
+// Test Open(), Start(), Close() calling sequence.
+TEST(WASAPIUnifiedStreamTest, OpenStartAndClose) {
+ scoped_ptr<AudioManager> audio_manager(AudioManager::CreateForTesting());
+ if (!CanRunUnifiedAudioTests(audio_manager.get()))
+ return;
+
+ MockAudioSourceCallback source;
+ AudioUnifiedStreamWrapper ausw(audio_manager.get());
+ WASAPIUnifiedStream* wus = ausw.Create();
+
+ EXPECT_TRUE(wus->Open());
+ EXPECT_CALL(source, OnError(wus))
+ .Times(0);
+ EXPECT_CALL(source, OnMoreIOData(NotNull(), NotNull(), _))
+ .Times(Between(0, 1))
+ .WillOnce(Return(ausw.frames_per_buffer()));
+ wus->Start(&source);
+ wus->Close();
+}
+
+// Verify that IO callbacks starts as they should.
+TEST(WASAPIUnifiedStreamTest, StartLoopbackAudio) {
+ scoped_ptr<AudioManager> audio_manager(AudioManager::CreateForTesting());
+ if (!CanRunUnifiedAudioTests(audio_manager.get()))
+ return;
+
+ base::MessageLoopForUI loop;
+ MockAudioSourceCallback source;
+ AudioUnifiedStreamWrapper ausw(audio_manager.get());
+ WASAPIUnifiedStream* wus = ausw.Create();
+
+ // Set up expected minimum delay estimation where we use a minium delay
+ // which is equal to the sum of render and capture sizes. We can never
+ // reach a delay lower than this value.
+ AudioBuffersState min_total_audio_delay(0, 2 * ausw.bytes_per_buffer());
+
+ EXPECT_TRUE(wus->Open());
+ EXPECT_CALL(source, OnError(wus))
+ .Times(0);
+ EXPECT_CALL(source, OnMoreIOData(
+ NotNull(), NotNull(), DelayGreaterThan(min_total_audio_delay)))
+ .Times(AtLeast(2))
+ .WillOnce(Return(ausw.frames_per_buffer()))
+ .WillOnce(DoAll(
+ QuitLoop(loop.message_loop_proxy()),
+ Return(ausw.frames_per_buffer())));
+ wus->Start(&source);
+ loop.PostDelayedTask(FROM_HERE, base::MessageLoop::QuitClosure(),
+ TestTimeouts::action_timeout());
+ loop.Run();
+ wus->Stop();
+ wus->Close();
+}
+
+// Perform a real-time test in loopback where the recorded audio is echoed
+// back to the speaker. This test allows the user to verify that the audio
+// sounds OK. A text file with name |kDeltaTimeMsFileName| is also generated.
+TEST(WASAPIUnifiedStreamTest, DISABLED_RealTimePlayThrough) {
+ scoped_ptr<AudioManager> audio_manager(AudioManager::CreateForTesting());
+ if (!CanRunUnifiedAudioTests(audio_manager.get()))
+ return;
+
+ base::MessageLoopForUI loop;
+ UnifiedSourceCallback source;
+ WASAPIUnifiedStream* wus = CreateDefaultUnifiedStream(audio_manager.get());
+
+ EXPECT_TRUE(wus->Open());
+ wus->Start(&source);
+ loop.PostDelayedTask(FROM_HERE, base::MessageLoop::QuitClosure(),
+ base::TimeDelta::FromMilliseconds(10000));
+ loop.Run();
+ wus->Close();
+}
+
+} // namespace media