diff options
author | dalecurtis <dalecurtis@chromium.org> | 2015-08-07 04:16:32 -0700 |
---|---|---|
committer | Commit bot <commit-bot@chromium.org> | 2015-08-07 11:17:09 +0000 |
commit | a7be0758c5f97679d0aed6b7f8c1f395d0d62d5c (patch) | |
tree | 9d943e01cb09cdbfb3708fcea96c7b0b8f856c0a /media | |
parent | ff13864f73ac781d885f68f98dd8c7a63d384675 (diff) | |
download | chromium_src-a7be0758c5f97679d0aed6b7f8c1f395d0d62d5c.zip chromium_src-a7be0758c5f97679d0aed6b7f8c1f395d0d62d5c.tar.gz chromium_src-a7be0758c5f97679d0aed6b7f8c1f395d0d62d5c.tar.bz2 |
Fix WASAPI restriction to be based on period size; fixes Win10.
The code was forcing the output buffer size to be an even divisor
of endpoint buffer size; in seemingly 90%+ of cases this worked
prior to Windows 10. With Windows 10, the device period is no
longer an even divisor of the endpoint buffer size; e.g. my
local machine has a period of 512 and a endpoint buffer of 1126.
In retrospect this restriction seems incorrect; instead it seems
like we just want to ensure that the buffer size is an even divisor
of the device period (which ultimately determines the callback
schedule). I've changed the code to log a warning when this is not
the case, as things will still work, we'll just get glitches. The
log will help us identify users who file bug reports.
The code was also looping over the available end point buffer size,
which seems incorrect, we should fulfill only one period at a time to
avoid shared-memory induced glitches; this was added to fix
http://crbug.com/170498
BUG=516196
TEST=Windows 10 audio works on two devices, Windows 7 works fine,
traces show that callbacks are regular in all cases. Confirmed that
http://crbug.com/170498 is not regressed on X-Fi.
Review URL: https://codereview.chromium.org/1276523004
Cr-Commit-Position: refs/heads/master@{#342334}
Diffstat (limited to 'media')
-rw-r--r-- | media/audio/win/audio_low_latency_output_win.cc | 175 |
1 files changed, 93 insertions, 82 deletions
diff --git a/media/audio/win/audio_low_latency_output_win.cc b/media/audio/win/audio_low_latency_output_win.cc index 83bac16..494d1b1 100644 --- a/media/audio/win/audio_low_latency_output_win.cc +++ b/media/audio/win/audio_low_latency_output_win.cc @@ -172,16 +172,44 @@ bool WASAPIAudioOutputStream::Open() { if (FAILED(hr)) return false; - // We know from experience that the best possible callback sequence is - // achieved when the packet size (given by the native device period) - // is an even divisor of the endpoint buffer size. + REFERENCE_TIME device_period = 0; + if (FAILED(CoreAudioUtil::GetDevicePeriod( + audio_client.get(), AUDCLNT_SHAREMODE_SHARED, &device_period))) { + return false; + } + + const int preferred_frames_per_buffer = static_cast<int>( + format_.Format.nSamplesPerSec * + CoreAudioUtil::RefererenceTimeToTimeDelta(device_period) + .InSecondsF() + + 0.5); + + // Packet size should always be an even divisor of the device period for + // best performance; things will still work otherwise, but may glitch for a + // couple of reasons. + // + // The first reason is if/when repeated RenderAudioFromSource() hit the + // shared memory boundary between the renderer and the browser. The next + // audio buffer is always requested after the current request is consumed. + // With back-to-back calls the round-trip may not be fast enough and thus + // audio will glitch as we fail to deliver audio in a timely manner. + // + // The second reason is event wakeup efficiency. We may have too few or too + // many frames to fill the output buffer requested by WASAPI. If too few, + // we'll refuse the render event and wait until more output space is + // available. If we have too many frames, we'll only partially fill and + // wait for the next render event. In either case certain remainders may + // leave us unable to fulfill the request in a timely manner, thus glitches. + // + // Log a warning in these cases so we can help users in the field. // Examples: 48kHz => 960 % 480, 44.1kHz => 896 % 448 or 882 % 441. - if (endpoint_buffer_size_frames_ % packet_size_frames_ != 0) { - LOG(ERROR) - << "Bailing out due to non-perfect timing. Buffer size of " + if (preferred_frames_per_buffer % packet_size_frames_) { + LOG(WARNING) + << "Using WASAPI output with a non-optimal buffer size, glitches from" + << " back to back shared memory reads and partial fills of WASAPI" + << " output buffers may occur. Buffer size of " << packet_size_frames_ << " is not an even divisor of " - << endpoint_buffer_size_frames_; - return false; + << preferred_frames_per_buffer; } } else { // TODO(henrika): break out to CoreAudioUtil::ExclusiveModeInitialize() @@ -439,86 +467,69 @@ bool WASAPIAudioOutputStream::RenderAudioFromSource(UINT64 device_frequency) { } // Check if there is enough available space to fit the packet size - // specified by the client. + // specified by the client, wait until a future callback. if (num_available_frames < packet_size_frames_) return true; - DLOG_IF(ERROR, num_available_frames % packet_size_frames_ != 0) - << "Non-perfect timing detected (num_available_frames=" - << num_available_frames << ", packet_size_frames=" - << packet_size_frames_ << ")"; - - // Derive the number of packets we need to get from the client to - // fill up the available area in the endpoint buffer. - // |num_packets| will always be one for exclusive-mode streams and - // will be one in most cases for shared mode streams as well. - // However, we have found that two packets can sometimes be - // required. - size_t num_packets = (num_available_frames / packet_size_frames_); - - for (size_t n = 0; n < num_packets; ++n) { - // Grab all available space in the rendering endpoint buffer - // into which the client can write a data packet. - hr = audio_render_client_->GetBuffer(packet_size_frames_, - &audio_data); - if (FAILED(hr)) { - DLOG(ERROR) << "Failed to use rendering audio buffer: " - << std::hex << hr; - return false; - } - - // Derive the audio delay which corresponds to the delay between - // a render event and the time when the first audio sample in a - // packet is played out through the speaker. This delay value - // can typically be utilized by an acoustic echo-control (AEC) - // unit at the render side. - UINT64 position = 0; - uint32 audio_delay_bytes = 0; - hr = audio_clock_->GetPosition(&position, NULL); - if (SUCCEEDED(hr)) { - // Stream position of the sample that is currently playing - // through the speaker. - double pos_sample_playing_frames = format_.Format.nSamplesPerSec * - (static_cast<double>(position) / device_frequency); - - // Stream position of the last sample written to the endpoint - // buffer. Note that, the packet we are about to receive in - // the upcoming callback is also included. - size_t pos_last_sample_written_frames = - num_written_frames_ + packet_size_frames_; - - // Derive the actual delay value which will be fed to the - // render client using the OnMoreData() callback. - audio_delay_bytes = (pos_last_sample_written_frames - - pos_sample_playing_frames) * format_.Format.nBlockAlign; - } - - // Read a data packet from the registered client source and - // deliver a delay estimate in the same callback to the client. - - int frames_filled = source_->OnMoreData( - audio_bus_.get(), audio_delay_bytes); - uint32 num_filled_bytes = frames_filled * format_.Format.nBlockAlign; - DCHECK_LE(num_filled_bytes, packet_size_bytes_); - - // Note: If this ever changes to output raw float the data must be - // clipped and sanitized since it may come from an untrusted - // source such as NaCl. - const int bytes_per_sample = format_.Format.wBitsPerSample >> 3; - audio_bus_->Scale(volume_); - audio_bus_->ToInterleaved( - frames_filled, bytes_per_sample, audio_data); - - - // Release the buffer space acquired in the GetBuffer() call. - // Render silence if we were not able to fill up the buffer totally. - DWORD flags = (num_filled_bytes < packet_size_bytes_) ? - AUDCLNT_BUFFERFLAGS_SILENT : 0; - audio_render_client_->ReleaseBuffer(packet_size_frames_, flags); + // Grab all available space in the rendering endpoint buffer + // into which the client can write a data packet. + hr = audio_render_client_->GetBuffer(packet_size_frames_, + &audio_data); + if (FAILED(hr)) { + DLOG(ERROR) << "Failed to use rendering audio buffer: " + << std::hex << hr; + return false; + } - num_written_frames_ += packet_size_frames_; + // Derive the audio delay which corresponds to the delay between + // a render event and the time when the first audio sample in a + // packet is played out through the speaker. This delay value + // can typically be utilized by an acoustic echo-control (AEC) + // unit at the render side. + UINT64 position = 0; + uint32 audio_delay_bytes = 0; + hr = audio_clock_->GetPosition(&position, NULL); + if (SUCCEEDED(hr)) { + // Stream position of the sample that is currently playing + // through the speaker. + double pos_sample_playing_frames = format_.Format.nSamplesPerSec * + (static_cast<double>(position) / device_frequency); + + // Stream position of the last sample written to the endpoint + // buffer. Note that, the packet we are about to receive in + // the upcoming callback is also included. + size_t pos_last_sample_written_frames = + num_written_frames_ + packet_size_frames_; + + // Derive the actual delay value which will be fed to the + // render client using the OnMoreData() callback. + audio_delay_bytes = (pos_last_sample_written_frames - + pos_sample_playing_frames) * format_.Format.nBlockAlign; } + // Read a data packet from the registered client source and + // deliver a delay estimate in the same callback to the client. + + int frames_filled = source_->OnMoreData( + audio_bus_.get(), audio_delay_bytes); + uint32 num_filled_bytes = frames_filled * format_.Format.nBlockAlign; + DCHECK_LE(num_filled_bytes, packet_size_bytes_); + + // Note: If this ever changes to output raw float the data must be + // clipped and sanitized since it may come from an untrusted + // source such as NaCl. + const int bytes_per_sample = format_.Format.wBitsPerSample >> 3; + audio_bus_->Scale(volume_); + audio_bus_->ToInterleaved( + frames_filled, bytes_per_sample, audio_data); + + // Release the buffer space acquired in the GetBuffer() call. + // Render silence if we were not able to fill up the buffer totally. + DWORD flags = (num_filled_bytes < packet_size_bytes_) ? + AUDCLNT_BUFFERFLAGS_SILENT : 0; + audio_render_client_->ReleaseBuffer(packet_size_frames_, flags); + + num_written_frames_ += packet_size_frames_; return true; } |