diff options
-rw-r--r-- | content/renderer/media/webrtc_audio_device_impl.cc | 132 | ||||
-rw-r--r-- | content/renderer/media/webrtc_audio_device_impl.h | 7 | ||||
-rw-r--r-- | media/audio/sample_rates.cc | 23 | ||||
-rw-r--r-- | media/audio/sample_rates.h | 32 | ||||
-rw-r--r-- | media/base/audio_decoder_config.cc | 43 | ||||
-rw-r--r-- | media/media.gyp | 2 |
6 files changed, 195 insertions, 44 deletions
diff --git a/content/renderer/media/webrtc_audio_device_impl.cc b/content/renderer/media/webrtc_audio_device_impl.cc index a215eb4..616e263 100644 --- a/content/renderer/media/webrtc_audio_device_impl.cc +++ b/content/renderer/media/webrtc_audio_device_impl.cc @@ -5,11 +5,14 @@ #include "content/renderer/media/webrtc_audio_device_impl.h" #include "base/bind.h" +#include "base/metrics/histogram.h" #include "base/string_util.h" #include "base/win/windows_version.h" #include "content/renderer/media/audio_hardware.h" #include "content/renderer/render_thread_impl.h" #include "media/audio/audio_util.h" +#include "media/audio/audio_parameters.h" +#include "media/audio/sample_rates.h" using media::AudioParameters; @@ -31,6 +34,94 @@ static int kValidInputRates[] = {48000}; static int kValidOutputRates[] = {48000}; #endif +namespace { + +// Helper enum used for histogramming buffer sizes expressed in number of +// audio frames. This enumerator covers all supported sizes for all platforms. +// Example: k480 <=> 480 audio frames <=> 10ms@48kHz. +// TODO(henrika): can be moved to the media namespace if more clients need it. +enum AudioFramesPerBuffer { + k160, + k320, + k440, // WebRTC works internally with 440 audio frames at 44.1kHz. + k480, + k640, + k880, + k960, + k1440, + k1920, + kUnexpectedAudioBufferSize // Must always be last! +}; + +enum HistogramDirection { + kAudioOutput, + kAudioInput +}; + +} // anonymous namespace + +// Helper method to convert integral values to their respective enum values +// above, or kUnexpectedAudioBufferSize if no match exists. +static AudioFramesPerBuffer AsAudioFramesPerBuffer(int frames_per_buffer) { + switch (frames_per_buffer) { + case 160: return k160; + case 320: return k320; + case 440: return k440; + case 480: return k480; + case 640: return k640; + case 880: return k880; + case 960: return k960; + case 1440: return k1440; + case 1920: return k1920; + } + return kUnexpectedAudioBufferSize; +} + +// Helper method which adds histogram data to be uploaded as part of an +// UMA logging event. Names: "WebRTC.Audio[Output|Input]SampleRate". +static void AddHistogramSampleRate(HistogramDirection dir, int param) { + media::AudioSampleRate asr = media::AsAudioSampleRate(param); + if (asr != media::kUnexpectedAudioSampleRate) { + if (dir == kAudioOutput) { + UMA_HISTOGRAM_ENUMERATION("WebRTC.AudioOutputSampleRate", + asr, media::kUnexpectedAudioSampleRate); + } else { + UMA_HISTOGRAM_ENUMERATION("WebRTC.AudioInputSampleRate", + asr, media::kUnexpectedAudioSampleRate); + } + } else { + // Report unexpected sample rates using a unique histogram name. + if (dir == kAudioOutput) { + UMA_HISTOGRAM_COUNTS("WebRTC.AudioOutputSampleRateUnexpected", param); + } else { + UMA_HISTOGRAM_COUNTS("WebRTC.AudioInputSampleRateUnexpected", param); + } + } +} + +// Helper method which adds histogram data to be uploaded as part of an +// UMA logging event. Names: "WebRTC.Audio[Output|Input]FramesPerBuffer". +static void AddHistogramFramesPerBuffer(HistogramDirection dir, int param) { + AudioFramesPerBuffer afpb = AsAudioFramesPerBuffer(param); + if (afpb != kUnexpectedAudioBufferSize) { + if (dir == kAudioOutput) { + UMA_HISTOGRAM_ENUMERATION("WebRTC.AudioOutputFramesPerBuffer", + afpb, kUnexpectedAudioBufferSize); + } else { + UMA_HISTOGRAM_ENUMERATION("WebRTC.AudioInputFramesPerBuffer", + afpb, kUnexpectedAudioBufferSize); + } + } else { + // Report unexpected sample rates using a unique histogram name. + if (dir == kAudioOutput) { + UMA_HISTOGRAM_COUNTS("WebRTC.AudioOutputFramesPerBufferUnexpected", + param); + } else { + UMA_HISTOGRAM_COUNTS("WebRTC.AudioInputFramesPerBufferUnexpected", param); + } + } +} + WebRtcAudioDeviceImpl::WebRtcAudioDeviceImpl() : ref_count_(0), render_loop_(base::MessageLoopProxy::current()), @@ -327,6 +418,7 @@ int32_t WebRtcAudioDeviceImpl::Init() { // This request is based on a synchronous IPC message. int out_sample_rate = audio_hardware::GetOutputSampleRate(); DVLOG(1) << "Audio output hardware sample rate: " << out_sample_rate; + AddHistogramSampleRate(kAudioOutput, out_sample_rate); // Verify that the reported output hardware sample rate is supported // on the current platform. @@ -342,6 +434,7 @@ int32_t WebRtcAudioDeviceImpl::Init() { // This request is based on a synchronous IPC message. int in_sample_rate = audio_hardware::GetInputSampleRate(); DVLOG(1) << "Audio input hardware sample rate: " << in_sample_rate; + AddHistogramSampleRate(kAudioInput, in_sample_rate); // Verify that the reported input hardware sample rate is supported // on the current platform. @@ -355,11 +448,10 @@ int32_t WebRtcAudioDeviceImpl::Init() { // Ask the browser for the default number of audio input channels. // This request is based on a synchronous IPC message. - ChannelLayout input_channel_layout = - audio_hardware::GetInputChannelLayout(); - DVLOG(1) << "Audio input hardware channels: " << input_channel_layout; - + ChannelLayout in_channel_layout = audio_hardware::GetInputChannelLayout(); + DVLOG(1) << "Audio input hardware channels: " << in_channel_layout; ChannelLayout out_channel_layout = CHANNEL_LAYOUT_MONO; + AudioParameters::Format in_format = AudioParameters::AUDIO_PCM_LINEAR; int in_buffer_size = 0; int out_buffer_size = 0; @@ -451,7 +543,7 @@ int32_t WebRtcAudioDeviceImpl::Init() { } // Linux #elif defined(OS_LINUX) || defined(OS_OPENBSD) - input_channel_layout = CHANNEL_LAYOUT_STEREO; + in_channel_layout = CHANNEL_LAYOUT_STEREO; out_channel_layout = CHANNEL_LAYOUT_MONO; // Based on tests using the current ALSA implementation in Chrome, we have @@ -473,13 +565,20 @@ int32_t WebRtcAudioDeviceImpl::Init() { out_sample_rate, 16, out_buffer_size); input_audio_parameters_.Reset( - in_format, input_channel_layout, in_sample_rate, + in_format, in_channel_layout, in_sample_rate, 16, in_buffer_size); // Create and configure the audio capturing client. audio_input_device_ = new AudioInputDevice( input_audio_parameters_, this, this); + UMA_HISTOGRAM_ENUMERATION("WebRTC.AudioOutputChannelLayout", + out_channel_layout, CHANNEL_LAYOUT_MAX); + UMA_HISTOGRAM_ENUMERATION("WebRTC.AudioInputChannelLayout", + in_channel_layout, CHANNEL_LAYOUT_MAX); + AddHistogramFramesPerBuffer(kAudioOutput, out_buffer_size); + AddHistogramFramesPerBuffer(kAudioInput, in_buffer_size); + // Create and configure the audio rendering client. audio_output_device_ = new AudioDevice(output_audio_parameters_, this); @@ -632,11 +731,15 @@ int32_t WebRtcAudioDeviceImpl::StartPlayout() { if (!audio_transport_callback_) { return -1; } + if (playing_) { // webrtc::VoiceEngine assumes that it is OK to call Start() twice and // that the call is ignored the second time. return 0; } + + start_render_time_ = base::Time::Now(); + audio_output_device_->Start(); playing_ = true; return 0; @@ -648,6 +751,14 @@ int32_t WebRtcAudioDeviceImpl::StopPlayout() { // webrtc::VoiceEngine assumes that it is OK to call Stop() just in case. return 0; } + + // Add histogram data to be uploaded as part of an UMA logging event. + // This histogram keeps track of total playout times. + if (!start_render_time_.is_null()) { + base::TimeDelta render_time = base::Time::Now() - start_render_time_; + UMA_HISTOGRAM_LONG_TIMES("WebRTC.AudioRenderTime", render_time); + } + audio_output_device_->Stop(); playing_ = false; return 0; @@ -678,6 +789,8 @@ int32_t WebRtcAudioDeviceImpl::StartRecording() { return 0; } + start_capture_time_ = base::Time::Now(); + // Specify the session_id which is mapped to a certain device. audio_input_device_->SetDevice(session_id_); audio_input_device_->Start(); @@ -696,6 +809,13 @@ int32_t WebRtcAudioDeviceImpl::StopRecording() { } } + // Add histogram data to be uploaded as part of an UMA logging event. + // This histogram keeps track of total recording times. + if (!start_capture_time_.is_null()) { + base::TimeDelta capture_time = base::Time::Now() - start_capture_time_; + UMA_HISTOGRAM_LONG_TIMES("WebRTC.AudioCaptureTime", capture_time); + } + audio_input_device_->Stop(); base::AutoLock auto_lock(lock_); diff --git a/content/renderer/media/webrtc_audio_device_impl.h b/content/renderer/media/webrtc_audio_device_impl.h index 5d948ca..6d4442a 100644 --- a/content/renderer/media/webrtc_audio_device_impl.h +++ b/content/renderer/media/webrtc_audio_device_impl.h @@ -128,8 +128,7 @@ // The adaptive analog mode of the AGC is always enabled for desktop platforms // in WebRTC. // -// Before recording starts, the ADM sets an AGC state in the -// AudioInputDevice by calling AudioInputDevice::SetAutomaticGainControl(true). +// Before recording starts, the ADM enables AGC on the AudioInputDevice. // // A capture session with AGC is started up as follows (simplified): // @@ -450,6 +449,10 @@ class CONTENT_EXPORT WebRtcAudioDeviceImpl // Local copy of the current Automatic Gain Control state. bool agc_is_enabled_; + // Used for histograms of total recording and playout times. + base::Time start_capture_time_; + base::Time start_render_time_; + DISALLOW_COPY_AND_ASSIGN(WebRtcAudioDeviceImpl); }; diff --git a/media/audio/sample_rates.cc b/media/audio/sample_rates.cc new file mode 100644 index 0000000..4bb75a4 --- /dev/null +++ b/media/audio/sample_rates.cc @@ -0,0 +1,23 @@ +// Copyright (c) 2012 The Chromium Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#include "media/audio/sample_rates.h" + +namespace media { + +AudioSampleRate AsAudioSampleRate(int sample_rate) { + switch (sample_rate) { + case 8000: return k8000Hz; + case 16000: return k16000Hz; + case 32000: return k32000Hz; + case 48000: return k48000Hz; + case 96000: return k96000Hz; + case 11025: return k11025Hz; + case 22050: return k22050Hz; + case 44100: return k44100Hz; + } + return kUnexpectedAudioSampleRate; +} + +} // namespace media diff --git a/media/audio/sample_rates.h b/media/audio/sample_rates.h new file mode 100644 index 0000000..9d47a13 --- /dev/null +++ b/media/audio/sample_rates.h @@ -0,0 +1,32 @@ +// Copyright (c) 2012 The Chromium Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#ifndef MEDIA_AUDIO_SAMPLE_RATES_H_ +#define MEDIA_AUDIO_SAMPLE_RATES_H_ + +#include "media/base/media_export.h" + +namespace media { + +// Enumeration used for histogramming sample rates into distinct buckets. +enum AudioSampleRate { + // Do not change the order of these values. + k8000Hz = 0, + k16000Hz = 1, + k32000Hz = 2, + k48000Hz = 3, + k96000Hz = 4, + k11025Hz = 5, + k22050Hz = 6, + k44100Hz = 7, + kUnexpectedAudioSampleRate // Must always be last! +}; + +// Helper method to convert integral values to their respective enum values, +// or kUnexpectedAudioSampleRate if no match exists. +MEDIA_EXPORT AudioSampleRate AsAudioSampleRate(int sample_rate); + +} // namespace media + +#endif // MEDIA_AUDIO_SAMPLE_RATES_H_ diff --git a/media/base/audio_decoder_config.cc b/media/base/audio_decoder_config.cc index 38e0136..5d15e7f 100644 --- a/media/base/audio_decoder_config.cc +++ b/media/base/audio_decoder_config.cc @@ -6,6 +6,7 @@ #include "base/logging.h" #include "base/metrics/histogram.h" +#include "media/audio/sample_rates.h" #include "media/base/limits.h" namespace media { @@ -28,36 +29,6 @@ AudioDecoderConfig::AudioDecoderConfig(AudioCodec codec, extra_data, extra_data_size, true); } -// Helper enum used only for histogramming samples-per-second. Put -// commonly-used rates here to get accurate reporting. Uncommon rates are -// reported in a separate, bucketized, histogram. -enum AudioSamplesPerSecond { - k8000Hz, - k16000Hz, - k32000Hz, - k48000Hz, - k96000Hz, - k11025Hz, - k22050Hz, - k44100Hz, - kUnexpected // Must always be last! -}; - -// Helper method to convert integral values to their respective enum values -// above, or kUnexpected if no match exists. -static AudioSamplesPerSecond AsAudioSamplesPerSecond(int samples_per_second) { - switch (samples_per_second) { - case 8000: return k8000Hz; - case 16000: return k16000Hz; - case 32000: return k32000Hz; - case 48000: return k48000Hz; - case 11025: return k11025Hz; - case 22050: return k22050Hz; - case 44100: return k44100Hz; - default: return kUnexpected; - } -} - void AudioDecoderConfig::Initialize(AudioCodec codec, int bits_per_channel, ChannelLayout channel_layout, @@ -73,12 +44,12 @@ void AudioDecoderConfig::Initialize(AudioCodec codec, // any values over 32 and even that is huge. UMA_HISTOGRAM_ENUMERATION("Media.AudioBitsPerChannel", bits_per_channel, 40); - UMA_HISTOGRAM_ENUMERATION( - "Media.AudioChannelLayout", channel_layout, CHANNEL_LAYOUT_MAX); - AudioSamplesPerSecond asps = AsAudioSamplesPerSecond(samples_per_second); - if (asps != kUnexpected) { - UMA_HISTOGRAM_ENUMERATION("Media.AudioSamplesPerSecond", asps, - kUnexpected); + UMA_HISTOGRAM_ENUMERATION("Media.AudioChannelLayout", channel_layout, + CHANNEL_LAYOUT_MAX); + AudioSampleRate asr = media::AsAudioSampleRate(samples_per_second); + if (asr != kUnexpectedAudioSampleRate) { + UMA_HISTOGRAM_ENUMERATION("Media.AudioSamplesPerSecond", asr, + kUnexpectedAudioSampleRate); } else { UMA_HISTOGRAM_COUNTS( "Media.AudioSamplesPerSecondUnexpected", samples_per_second); diff --git a/media/media.gyp b/media/media.gyp index 3bd7cc4..673f51e 100644 --- a/media/media.gyp +++ b/media/media.gyp @@ -97,6 +97,8 @@ 'audio/null_audio_sink.h', 'audio/pulse/pulse_output.cc', 'audio/pulse/pulse_output.h', + 'audio/sample_rates.cc', + 'audio/sample_rates.h', 'audio/simple_sources.cc', 'audio/simple_sources.h', 'audio/win/audio_low_latency_input_win.cc', |