diff options
-rw-r--r-- | content/renderer/media/webrtc_audio_capturer.cc | 28 | ||||
-rw-r--r-- | content/renderer/media/webrtc_audio_device_impl.cc | 8 | ||||
-rw-r--r-- | content/renderer/media/webrtc_audio_renderer.cc | 24 |
3 files changed, 12 insertions, 48 deletions
diff --git a/content/renderer/media/webrtc_audio_capturer.cc b/content/renderer/media/webrtc_audio_capturer.cc index d59ed32..47639b7 100644 --- a/content/renderer/media/webrtc_audio_capturer.cc +++ b/content/renderer/media/webrtc_audio_capturer.cc @@ -35,34 +35,17 @@ static int kValidInputRates[] = {44100}; static int GetBufferSizeForSampleRate(int sample_rate) { int buffer_size = 0; #if defined(OS_WIN) || defined(OS_MACOSX) - // Use different buffer sizes depending on the current hardware sample rate. - if (sample_rate == 44100) { - // We do run at 44.1kHz at the actual audio layer, but ask for frames - // at 44.0kHz to ensure that we can feed them to the webrtc::VoiceEngine. - buffer_size = 440; - } else { - buffer_size = (sample_rate / 100); - DCHECK_EQ(buffer_size * 100, sample_rate) << - "Sample rate not supported"; - } + // Use a buffer size of 10ms. + buffer_size = (sample_rate / 100); #elif defined(OS_LINUX) || defined(OS_OPENBSD) // Based on tests using the current ALSA implementation in Chrome, we have // found that the best combination is 20ms on the input side and 10ms on the // output side. - // TODO(henrika): It might be possible to reduce the input buffer - // size and reduce the delay even more. - if (sample_rate == 44100) - buffer_size = 2 * 440; - else - buffer_size = 2 * sample_rate / 100; + buffer_size = 2 * sample_rate / 100; #elif defined(OS_ANDROID) // TODO(leozwang): Tune and adjust buffer size on Android. - if (sample_rate == 44100) - buffer_size = 2 * 440; - else buffer_size = 2 * sample_rate / 100; #endif - return buffer_size; } @@ -76,10 +59,7 @@ class WebRtcAudioCapturer::ConfiguredBuffer : bool Initialize(int sample_rate, media::ChannelLayout channel_layout) { int buffer_size = GetBufferSizeForSampleRate(sample_rate); - if (!buffer_size) { - DLOG(ERROR) << "Unsupported sample-rate: " << sample_rate; - return false; - } + DVLOG(1) << "Using WebRTC input buffer size: " << buffer_size; media::AudioParameters::Format format = media::AudioParameters::AUDIO_PCM_LOW_LATENCY; diff --git a/content/renderer/media/webrtc_audio_device_impl.cc b/content/renderer/media/webrtc_audio_device_impl.cc index cf4a644..d6588fb 100644 --- a/content/renderer/media/webrtc_audio_device_impl.cc +++ b/content/renderer/media/webrtc_audio_device_impl.cc @@ -101,10 +101,6 @@ void WebRtcAudioDeviceImpl::CaptureData(const int16* audio_data, uint32_t new_mic_level = 0; int samples_per_sec = input_sample_rate(); - if (samples_per_sec == 44100) { - // Even if the hardware runs at 44.1kHz, we use 44.0 internally. - samples_per_sec = 44000; - } const int samples_per_10_msec = (samples_per_sec / 100); int bytes_per_sample = input_audio_parameters.bits_per_sample() / 8; const int bytes_per_10_msec = @@ -171,10 +167,6 @@ void WebRtcAudioDeviceImpl::RenderData(uint8* audio_data, DCHECK_LE(channels, output_channels()); int samples_per_sec = output_sample_rate(); - if (samples_per_sec == 44100) { - // Even if the hardware runs at 44.1kHz, we use 44.0 internally. - samples_per_sec = 44000; - } int samples_per_10_msec = (samples_per_sec / 100); int bytes_per_sample = output_audio_parameters_.bits_per_sample() / 8; const int bytes_per_10_msec = diff --git a/content/renderer/media/webrtc_audio_renderer.cc b/content/renderer/media/webrtc_audio_renderer.cc index 5b8d3d2..c19bd1c 100644 --- a/content/renderer/media/webrtc_audio_renderer.cc +++ b/content/renderer/media/webrtc_audio_renderer.cc @@ -46,7 +46,7 @@ const int kValidOutputRates[] = {44100}; enum AudioFramesPerBuffer { k160, k320, - k440, // WebRTC works internally with 440 audio frames at 44.1kHz. + k440, k480, k640, k880, @@ -58,11 +58,14 @@ enum AudioFramesPerBuffer { // Helper method to convert integral values to their respective enum values // above, or kUnexpectedAudioBufferSize if no match exists. +// We map 441 to k440 to avoid changes in the XML part for histograms. +// It is still possible to map the histogram result to the actual buffer size. +// See http://crbug.com/243450 for details. AudioFramesPerBuffer AsAudioFramesPerBuffer(int frames_per_buffer) { switch (frames_per_buffer) { case 160: return k160; case 320: return k320; - case 440: return k440; + case 441: return k440; case 480: return k480; case 640: return k640; case 880: return k880; @@ -147,23 +150,12 @@ bool WebRtcAudioRenderer::Initialize(WebRtcAudioRendererSource* source) { } // Set up audio parameters for the source, i.e., the WebRTC client. + // The WebRTC client only supports multiples of 10ms as buffer size where // 10ms is preferred for lowest possible delay. - media::AudioParameters source_params; - int buffer_size = 0; - - if (sample_rate % 8000 == 0) { - buffer_size = (sample_rate / 100); - } else if (sample_rate == 44100) { - // The resampler in WebRTC does not support 441 as input. We hard code - // the size to 440 (~0.9977ms) instead and rely on the internal jitter - // buffer in WebRTC to deal with the resulting drift. - // TODO(henrika): ensure that WebRTC supports 44100Hz and use 441 instead. - buffer_size = 440; - } else { - return false; - } + int buffer_size = (sample_rate / 100); + DVLOG(1) << "Using WebRTC output buffer size: " << buffer_size; int channels = ChannelLayoutToChannelCount(channel_layout); source_params.Reset(media::AudioParameters::AUDIO_PCM_LOW_LATENCY, |