diff options
author | enal@chromium.org <enal@chromium.org@0039d316-1c4b-4281-b951-d872f2087c98> | 2011-12-21 01:27:03 +0000 |
---|---|---|
committer | enal@chromium.org <enal@chromium.org@0039d316-1c4b-4281-b951-d872f2087c98> | 2011-12-21 01:27:03 +0000 |
commit | 3c99d2c8a26c5b2297f36900c3e277360c1f120a (patch) | |
tree | 04071b030e99ad7df94d83447d6b68fc3fda1f09 /content/renderer/media | |
parent | 024bba94451a673bcaa61394e2130e4bdc0ac0df (diff) | |
download | chromium_src-3c99d2c8a26c5b2297f36900c3e277360c1f120a.zip chromium_src-3c99d2c8a26c5b2297f36900c3e277360c1f120a.tar.gz chromium_src-3c99d2c8a26c5b2297f36900c3e277360c1f120a.tar.bz2 |
Fix start/stop of html5 audio stream and race condition when pausing.
(a) Update earliest time when html5 audio can end, thus fixing problem
when the stream was being truncated too early,
(b) Set length of data in buffer, so polling code used for several first
buffers can stop waiting the moment data is ready,
(c) Fix zeroing out end of buffer in partially filled buffer -- code was zeroing
correct number of bytes, but at the beginning of buffer, not at the end.
Lot of thanks to Shijing for identifying the issue, I heard that something is
wrong but could not figure out exact cause.
(d) Zero length of data in buffer when AudioDevice receives 'pause' command,
thus work around race condition when one thread pausing the playback and other
simultaneously asks for more data -- depending on exact timing buffer could
be left in the unpredictable state, resulting in the next call for data getting
extra buffer of silence (not fatal, but very noticeable when repeatedly playing
short audio stream).
(e) Remove pre-rendering of first buffer, it is not necessary for <audio>,
WebAudio (confirmed by Chris), and WebRTC (confirmed by Shijing).
WebRTC/WebAudio changes are actually no-op, as their renderers now just return
'I've filled the entire buffer'.
Note: that CL fixes html5 audio playback of short streams. It is possible to further
improve the code and maybe get rid of storing length of data in the buffer by changing
the way AudioRendererBase::FillBuffer() signals end of playback. Right now it is
synchronous, done only when host asks for more data, we probably can make it
asynchronous, by issuing delayed task into the child process IO message loop,
but that should be separate CL.
TEST=Please load the page http://www.corp.google.com/~tommi/average.html and listen.
TEST=I'll add layout test that verifies that timing is reasonable, but it will take
TEST=some time to add.
Review URL: http://codereview.chromium.org/8909006
git-svn-id: svn://svn.chromium.org/chrome/trunk/src@115253 0039d316-1c4b-4281-b951-d872f2087c98
Diffstat (limited to 'content/renderer/media')
-rw-r--r-- | content/renderer/media/audio_device.cc | 23 | ||||
-rw-r--r-- | content/renderer/media/audio_device.h | 21 | ||||
-rw-r--r-- | content/renderer/media/audio_renderer_impl.cc | 14 | ||||
-rw-r--r-- | content/renderer/media/audio_renderer_impl.h | 6 | ||||
-rw-r--r-- | content/renderer/media/webrtc_audio_device_impl.cc | 3 | ||||
-rw-r--r-- | content/renderer/media/webrtc_audio_device_impl.h | 7 |
6 files changed, 48 insertions, 26 deletions
diff --git a/content/renderer/media/audio_device.cc b/content/renderer/media/audio_device.cc index 03ff1be..84ffcca 100644 --- a/content/renderer/media/audio_device.cc +++ b/content/renderer/media/audio_device.cc @@ -287,10 +287,7 @@ void AudioDevice::Run() { audio_thread_->SetThreadPriority(base::kThreadPriority_RealtimeAudio); base::SharedMemory shared_memory(shared_memory_handle_, false); - shared_memory.Map(memory_length_); - // Allow the client to pre-populate the buffer. - FireRenderCallback(reinterpret_cast<int16*>(shared_memory.memory())); - + shared_memory.Map(media::TotalSharedMemorySizeInBytes(memory_length_)); base::SyncSocket socket(socket_handle_); int pending_data; @@ -301,23 +298,34 @@ void AudioDevice::Run() { socket.Receive(&pending_data, sizeof(pending_data))) { if (pending_data == media::AudioOutputController::kPauseMark) { memset(shared_memory.memory(), 0, memory_length_); + media::SetActualDataSizeInBytes(&shared_memory, memory_length_, 0); continue; } else if (pending_data < 0) { break; } + // Convert the number of pending bytes in the render buffer // into milliseconds. audio_delay_milliseconds_ = pending_data / bytes_per_ms; - FireRenderCallback(reinterpret_cast<int16*>(shared_memory.memory())); + size_t num_frames = FireRenderCallback( + reinterpret_cast<int16*>(shared_memory.memory())); + + // Let the host know we are done. + media::SetActualDataSizeInBytes(&shared_memory, + memory_length_, + num_frames * channels_ * sizeof(int16)); } } -void AudioDevice::FireRenderCallback(int16* data) { +size_t AudioDevice::FireRenderCallback(int16* data) { TRACE_EVENT0("audio", "AudioDevice::FireRenderCallback"); + size_t num_frames = 0; if (callback_) { // Update the audio-delay measurement then ask client to render audio. - callback_->Render(audio_data_, buffer_size_, audio_delay_milliseconds_); + num_frames = callback_->Render(audio_data_, + buffer_size_, + audio_delay_milliseconds_); // Interleave, scale, and clip to int16. // TODO(crogers): avoid converting to integer here, and pass the data @@ -327,6 +335,7 @@ void AudioDevice::FireRenderCallback(int16* data) { data, buffer_size_); } + return num_frames; } void AudioDevice::ShutDownAudioThread() { diff --git a/content/renderer/media/audio_device.h b/content/renderer/media/audio_device.h index 000593a..aba52a3 100644 --- a/content/renderer/media/audio_device.h +++ b/content/renderer/media/audio_device.h @@ -81,9 +81,13 @@ class CONTENT_EXPORT AudioDevice public: class CONTENT_EXPORT RenderCallback { public: - virtual void Render(const std::vector<float*>& audio_data, - size_t number_of_frames, - size_t audio_delay_milliseconds) = 0; + // Fills entire buffer of length |number_of_frames| but returns actual + // number of frames it got from its source (|number_of_frames| in case of + // continuous stream). That actual number of frames is passed to host + // together with PCM audio data and host is free to use or ignore it. + virtual size_t Render(const std::vector<float*>& audio_data, + size_t number_of_frames, + size_t audio_delay_milliseconds) = 0; protected: virtual ~RenderCallback() {} }; @@ -159,10 +163,13 @@ class CONTENT_EXPORT AudioDevice void Send(IPC::Message* message); - // Method called on the audio thread (+ one call on the IO thread) ---------- - // Calls the client's callback for rendering audio. There will also be one - // initial call on the IO thread before the audio thread has been created. - void FireRenderCallback(int16* data); + // Method called on the audio thread ---------------------------------------- + // Calls the client's callback for rendering audio. + // Returns actual number of filled frames that callback returned. This length + // is passed to host at the end of the shared memory (i.e. buffer). In case of + // continuous stream host just ignores it and assumes buffer is always filled + // to its capacity. + size_t FireRenderCallback(int16* data); // DelegateSimpleThread::Delegate implementation. virtual void Run() OVERRIDE; diff --git a/content/renderer/media/audio_renderer_impl.cc b/content/renderer/media/audio_renderer_impl.cc index d946857..ca905a0 100644 --- a/content/renderer/media/audio_renderer_impl.cc +++ b/content/renderer/media/audio_renderer_impl.cc @@ -192,14 +192,14 @@ void AudioRendererImpl::DoSeek() { audio_device_->Pause(true); } -void AudioRendererImpl::Render(const std::vector<float*>& audio_data, - size_t number_of_frames, - size_t audio_delay_milliseconds) { +size_t AudioRendererImpl::Render(const std::vector<float*>& audio_data, + size_t number_of_frames, + size_t audio_delay_milliseconds) { if (stopped_ || GetPlaybackRate() == 0.0f) { // Output silence if stopped. for (size_t i = 0; i < audio_data.size(); ++i) memset(audio_data[i], 0, sizeof(float) * number_of_frames); - return; + return 0; } // Adjust the playback delay. @@ -225,6 +225,7 @@ void AudioRendererImpl::Render(const std::vector<float*>& audio_data, request_delay, time_now >= earliest_end_time_); DCHECK_LE(filled, buf_size); + UpdateEarliestEndTime(filled, request_delay, time_now); uint32 filled_frames = filled / bytes_per_frame; @@ -241,7 +242,10 @@ void AudioRendererImpl::Render(const std::vector<float*>& audio_data, // If FillBuffer() didn't give us enough data then zero out the remainder. if (filled_frames < number_of_frames) { int frames_to_zero = number_of_frames - filled_frames; - memset(audio_data[channel_index], 0, sizeof(float) * frames_to_zero); + memset(audio_data[channel_index] + filled_frames, + 0, + sizeof(float) * frames_to_zero); } } + return filled_frames; } diff --git a/content/renderer/media/audio_renderer_impl.h b/content/renderer/media/audio_renderer_impl.h index 1c8ec6a..f555b11 100644 --- a/content/renderer/media/audio_renderer_impl.h +++ b/content/renderer/media/audio_renderer_impl.h @@ -75,9 +75,9 @@ class CONTENT_EXPORT AudioRendererImpl void DoSeek(); // AudioDevice::RenderCallback implementation. - virtual void Render(const std::vector<float*>& audio_data, - size_t number_of_frames, - size_t audio_delay_milliseconds) OVERRIDE; + virtual size_t Render(const std::vector<float*>& audio_data, + size_t number_of_frames, + size_t audio_delay_milliseconds) OVERRIDE; // Accessors used by tests. base::Time earliest_end_time() const { diff --git a/content/renderer/media/webrtc_audio_device_impl.cc b/content/renderer/media/webrtc_audio_device_impl.cc index 57a1260..300439d 100644 --- a/content/renderer/media/webrtc_audio_device_impl.cc +++ b/content/renderer/media/webrtc_audio_device_impl.cc @@ -60,7 +60,7 @@ int32_t WebRtcAudioDeviceImpl::Release() { return ret; } -void WebRtcAudioDeviceImpl::Render( +size_t WebRtcAudioDeviceImpl::Render( const std::vector<float*>& audio_data, size_t number_of_frames, size_t audio_delay_milliseconds) { @@ -115,6 +115,7 @@ void WebRtcAudioDeviceImpl::Render( bytes_per_sample_, number_of_frames); } + return number_of_frames; } void WebRtcAudioDeviceImpl::Capture( diff --git a/content/renderer/media/webrtc_audio_device_impl.h b/content/renderer/media/webrtc_audio_device_impl.h index cad0bf0..28f4ae4 100644 --- a/content/renderer/media/webrtc_audio_device_impl.h +++ b/content/renderer/media/webrtc_audio_device_impl.h @@ -6,6 +6,7 @@ #define CONTENT_RENDERER_MEDIA_WEBRTC_AUDIO_DEVICE_IMPL_H_ #pragma once +#include <string> #include <vector> #include "base/basictypes.h" @@ -111,9 +112,9 @@ class CONTENT_EXPORT WebRtcAudioDeviceImpl static bool ImplementsThreadSafeReferenceCounting() { return true; } // AudioDevice::RenderCallback implementation. - virtual void Render(const std::vector<float*>& audio_data, - size_t number_of_frames, - size_t audio_delay_milliseconds) OVERRIDE; + virtual size_t Render(const std::vector<float*>& audio_data, + size_t number_of_frames, + size_t audio_delay_milliseconds) OVERRIDE; // AudioInputDevice::CaptureCallback implementation. virtual void Capture(const std::vector<float*>& audio_data, |