diff options
author | rtoy@chromium.org <rtoy@chromium.org> | 2015-05-22 22:16:44 +0000 |
---|---|---|
committer | rtoy@chromium.org <rtoy@chromium.org> | 2015-05-22 22:16:44 +0000 |
commit | e585a8d4a339b2e94d81d8f2296bc4e9fdf0cd37 (patch) | |
tree | 25ebe282f3727bfc39fa505a95ea5e93ef9909bd | |
parent | 24382566c0e87bf03a44b323209d4d31ab8fdef5 (diff) | |
download | chromium_src-e585a8d4a339b2e94d81d8f2296bc4e9fdf0cd37.zip chromium_src-e585a8d4a339b2e94d81d8f2296bc4e9fdf0cd37.tar.gz chromium_src-e585a8d4a339b2e94d81d8f2296bc4e9fdf0cd37.tar.bz2 |
Access current frame counter carefully and remove m_cachedSampleFrame.
m_cachedSampleFrame was added in https://codereview.chromium.org/773273002 to
handle a race condition on accessing the the current frame counter that is being
updated in the audio thread. Use acquireLoad and releaseStore so no race can
happen while it is being updated.
BUG=488211
TEST=Ran the TSAN test case from issue 431874 without problems.
Review URL: https://codereview.chromium.org/1149913002
git-svn-id: svn://svn.chromium.org/blink/trunk@195816 bbb929c8-8fbe-4397-9dbb-9b2b20218538
5 files changed, 16 insertions, 43 deletions
diff --git a/third_party/WebKit/LayoutTests/webaudio/scriptprocessornode.html b/third_party/WebKit/LayoutTests/webaudio/scriptprocessornode.html index c65ddc6..4883400 100644 --- a/third_party/WebKit/LayoutTests/webaudio/scriptprocessornode.html +++ b/third_party/WebKit/LayoutTests/webaudio/scriptprocessornode.html @@ -46,18 +46,10 @@ function processAudioData(event) { var allowedTimeGap = 0.0000001; // There may be a little time gap which is from different thread operation - // between currentTime when main thread fires onaudioprocess() and currenTime when read in JS - // since currentTime is continuously increasing on audio thread. And caching of the currentTime - // can cause playbackTime to be one block behind. So allow for that. + // between currentTime when main thread fires onaudioprocess() and currentTime when read in JS + // since currentTime is continuously increasing on audio thread. - var closeEnough = Math.abs(playbackTime - expectedTime) <= allowedTimeGap; - closeEnough = closeEnough || (Math.abs(playbackTime - (expectedTime - 128 / context.sampleRate)) <= allowedTimeGap); - - if (!closeEnough) { - testFailed("playbackTime should be within " + allowedTimeGap + " of either " - + expectedTime + " or " + (expectedTime - 128 / context.sampleRate) - + ". Was " + playbackTime); - } + shouldBeCloseTo("playbackTime", expectedTime, allowedTimeGap, true); buffer = event.outputBuffer; if (buffer.numberOfChannels != outputChannels) diff --git a/third_party/WebKit/Source/modules/webaudio/AudioContext.cpp b/third_party/WebKit/Source/modules/webaudio/AudioContext.cpp index 845d729f..a6e0aeb 100644 --- a/third_party/WebKit/Source/modules/webaudio/AudioContext.cpp +++ b/third_party/WebKit/Source/modules/webaudio/AudioContext.cpp @@ -109,7 +109,6 @@ AudioContext::AudioContext(Document* document) , m_deferredTaskHandler(DeferredTaskHandler::create()) , m_isOfflineContext(false) , m_contextState(Suspended) - , m_cachedSampleFrame(0) { m_didInitializeContextGraphMutex = true; m_destinationNode = DefaultAudioDestinationNode::create(this); @@ -130,7 +129,6 @@ AudioContext::AudioContext(Document* document, unsigned numberOfChannels, size_t , m_deferredTaskHandler(DeferredTaskHandler::create()) , m_isOfflineContext(true) , m_contextState(Suspended) - , m_cachedSampleFrame(0) { m_didInitializeContextGraphMutex = true; // Create a new destination for offline rendering. @@ -679,22 +677,6 @@ PeriodicWave* AudioContext::createPeriodicWave(DOMFloat32Array* real, DOMFloat32 return PeriodicWave::create(sampleRate(), real, imag); } -size_t AudioContext::currentSampleFrame() const -{ - if (isAudioThread()) - return m_destinationNode ? m_destinationNode->audioDestinationHandler().currentSampleFrame() : 0; - - return m_cachedSampleFrame; -} - -double AudioContext::currentTime() const -{ - if (isAudioThread()) - return m_destinationNode ? m_destinationNode->audioDestinationHandler().currentTime() : 0; - - return m_cachedSampleFrame / static_cast<double>(sampleRate()); -} - String AudioContext::state() const { // These strings had better match the strings for AudioContextState in AudioContext.idl. @@ -880,9 +862,6 @@ void AudioContext::handlePreRenderTasks() // Check to see if source nodes can be stopped because the end time has passed. handleStoppableSourceNodes(); - // Update the cached sample frame value. - m_cachedSampleFrame = currentSampleFrame(); - unlock(); } } diff --git a/third_party/WebKit/Source/modules/webaudio/AudioContext.h b/third_party/WebKit/Source/modules/webaudio/AudioContext.h index 9ea7dd2..bfb3305 100644 --- a/third_party/WebKit/Source/modules/webaudio/AudioContext.h +++ b/third_party/WebKit/Source/modules/webaudio/AudioContext.h @@ -108,12 +108,15 @@ public: AudioDestinationNode* destination() { return m_destinationNode.get(); } - // currentSampleFrame() and currentTime() can be called from both the main - // thread and the audio thread. Note that, however, they return the cached - // value instead of actual current ones when they are accessed from the main - // thread. See: crbug.com/431874 - size_t currentSampleFrame() const; - double currentTime() const; + size_t currentSampleFrame() const + { + return m_destinationNode ? m_destinationNode->audioDestinationHandler().currentSampleFrame() : 0; + } + + double currentTime() const + { + return m_destinationNode ? m_destinationNode->audioDestinationHandler().currentTime() : 0; + } float sampleRate() const { return m_destinationNode ? m_destinationNode->handler().sampleRate() : 0; } @@ -311,9 +314,6 @@ private: // The Promise that is returned by close(); RefPtrWillBeMember<ScriptPromiseResolver> m_closeResolver; - // Follows the destination's currentSampleFrame, but might be slightly behind due to locking. - size_t m_cachedSampleFrame; - // Tries to handle AudioBufferSourceNodes that were started but became disconnected or was never // connected. Because these never get pulled anymore, they will stay around forever. So if we // can, try to stop them so they can be collected. diff --git a/third_party/WebKit/Source/modules/webaudio/AudioDestinationNode.cpp b/third_party/WebKit/Source/modules/webaudio/AudioDestinationNode.cpp index 538a366..920bb6c 100644 --- a/third_party/WebKit/Source/modules/webaudio/AudioDestinationNode.cpp +++ b/third_party/WebKit/Source/modules/webaudio/AudioDestinationNode.cpp @@ -31,6 +31,7 @@ #include "modules/webaudio/AudioNodeOutput.h" #include "platform/audio/AudioUtilities.h" #include "platform/audio/DenormalDisabler.h" +#include "wtf/Atomics.h" namespace blink { @@ -90,7 +91,8 @@ void AudioDestinationHandler::render(AudioBus* sourceBus, AudioBus* destinationB context()->handlePostRenderTasks(); // Advance current sample-frame. - m_currentSampleFrame += numberOfFrames; + size_t newSampleFrame = m_currentSampleFrame + numberOfFrames; + releaseStore(&m_currentSampleFrame, newSampleFrame); } // ---------------------------------------------------------------- diff --git a/third_party/WebKit/Source/modules/webaudio/AudioDestinationNode.h b/third_party/WebKit/Source/modules/webaudio/AudioDestinationNode.h index bba01f5..142ee08 100644 --- a/third_party/WebKit/Source/modules/webaudio/AudioDestinationNode.h +++ b/third_party/WebKit/Source/modules/webaudio/AudioDestinationNode.h @@ -48,7 +48,7 @@ public: // It will optionally give us local/live audio input in sourceBus (if it's not 0). virtual void render(AudioBus* sourceBus, AudioBus* destinationBus, size_t numberOfFrames) override final; - size_t currentSampleFrame() const { return m_currentSampleFrame; } + size_t currentSampleFrame() const { return acquireLoad(&m_currentSampleFrame); } double currentTime() const { return currentSampleFrame() / static_cast<double>(sampleRate()); } virtual unsigned long maxChannelCount() const { return 0; } |