summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorvrk@chromium.org <vrk@chromium.org@0039d316-1c4b-4281-b951-d872f2087c98>2012-04-06 03:17:44 +0000
committervrk@chromium.org <vrk@chromium.org@0039d316-1c4b-4281-b951-d872f2087c98>2012-04-06 03:17:44 +0000
commite4fc09e874d7a7fc87dc1565452d32985949a3cf (patch)
tree8a8a5f596294a3db7c6aa3998b8562a4b24e60e9
parentf2ebbf06167ad4ff8cb23109b3652c8c4b7ff5f7 (diff)
downloadchromium_src-e4fc09e874d7a7fc87dc1565452d32985949a3cf.zip
chromium_src-e4fc09e874d7a7fc87dc1565452d32985949a3cf.tar.gz
chromium_src-e4fc09e874d7a7fc87dc1565452d32985949a3cf.tar.bz2
Merge AudioRendererImpl and AudioRendererBase; add NullAudioSink
This CL removes AudioRendererImpl and replaces it with AudioRendererBase. NullAudioRenderer is also removed and replaced with NullAudioSink. Also, a subtle bug is fixed in AudioRendererBase to allow for smooth video playback when running Chrome with the --disable-audio flag. BUG=119549,116645 TEST=media_unittests, playing video on Chrome/content_shell with and without --disable-audio flag should look identical Review URL: http://codereview.chromium.org/9826023 git-svn-id: svn://svn.chromium.org/chrome/trunk/src@131089 0039d316-1c4b-4281-b951-d872f2087c98
-rw-r--r--content/content_renderer.gypi2
-rw-r--r--content/content_tests.gypi1
-rw-r--r--content/renderer/media/audio_device.cc9
-rw-r--r--content/renderer/media/audio_device.h20
-rw-r--r--content/renderer/media/audio_hardware.cc30
-rw-r--r--content/renderer/media/audio_hardware.h4
-rw-r--r--content/renderer/media/audio_input_device.h6
-rw-r--r--content/renderer/media/audio_renderer_impl.cc229
-rw-r--r--content/renderer/media/audio_renderer_impl.h137
-rw-r--r--content/renderer/media/audio_renderer_impl_unittest.cc199
-rw-r--r--content/renderer/media/render_audiosourceprovider.cc6
-rw-r--r--content/renderer/media/render_audiosourceprovider.h1
-rw-r--r--content/renderer/media/renderer_webaudiodevice_impl.cc6
-rw-r--r--content/renderer/media/renderer_webaudiodevice_impl.h6
-rw-r--r--content/renderer/media/webrtc_audio_device_impl.cc17
-rw-r--r--content/renderer/media/webrtc_audio_device_impl.h14
-rw-r--r--content/renderer/render_view_impl.cc6
-rw-r--r--media/audio/audio_util.cc32
-rw-r--r--media/audio/audio_util.h4
-rw-r--r--media/audio/null_audio_sink.cc110
-rw-r--r--media/audio/null_audio_sink.h65
-rw-r--r--media/base/audio_renderer_sink.h10
-rw-r--r--media/base/pipeline.cc2
-rw-r--r--media/filters/audio_renderer_base.cc302
-rw-r--r--media/filters/audio_renderer_base.h125
-rw-r--r--media/filters/audio_renderer_base_unittest.cc81
-rw-r--r--media/filters/null_audio_renderer.cc95
-rw-r--r--media/filters/null_audio_renderer.h65
-rw-r--r--media/filters/pipeline_integration_test_base.cc5
-rw-r--r--media/media.gyp4
-rw-r--r--media/tools/player_wtl/movie.cc6
-rw-r--r--media/tools/player_x11/player_x11.cc6
-rw-r--r--webkit/media/webmediaplayer_impl.cc6
33 files changed, 633 insertions, 978 deletions
diff --git a/content/content_renderer.gypi b/content/content_renderer.gypi
index 8080d3b..e3ca2ea 100644
--- a/content/content_renderer.gypi
+++ b/content/content_renderer.gypi
@@ -84,8 +84,6 @@
'renderer/media/audio_input_message_filter.h',
'renderer/media/audio_message_filter.cc',
'renderer/media/audio_message_filter.h',
- 'renderer/media/audio_renderer_impl.cc',
- 'renderer/media/audio_renderer_impl.h',
'renderer/media/capture_video_decoder.cc',
'renderer/media/capture_video_decoder.h',
'renderer/media/media_stream_center.h',
diff --git a/content/content_tests.gypi b/content/content_tests.gypi
index 1befa63..b1d6dc7 100644
--- a/content/content_tests.gypi
+++ b/content/content_tests.gypi
@@ -283,7 +283,6 @@
'renderer/active_notification_tracker_unittest.cc',
'renderer/gpu/input_event_filter_unittest.cc',
'renderer/media/audio_message_filter_unittest.cc',
- 'renderer/media/audio_renderer_impl_unittest.cc',
'renderer/media/capture_video_decoder_unittest.cc',
'renderer/media/video_capture_impl_unittest.cc',
'renderer/media/video_capture_message_filter_unittest.cc',
diff --git a/content/renderer/media/audio_device.cc b/content/renderer/media/audio_device.cc
index 7a07ab5..b87f300 100644
--- a/content/renderer/media/audio_device.cc
+++ b/content/renderer/media/audio_device.cc
@@ -69,10 +69,6 @@ void AudioDevice::Initialize(const media::AudioParameters& params,
CHECK(!callback_); // Calling Initialize() twice?
audio_parameters_ = params;
- audio_parameters_.Reset(
- params.format(),
- params.channel_layout(), params.sample_rate(), params.bits_per_sample(),
- params.frames_per_buffer());
callback_ = callback;
}
@@ -85,7 +81,8 @@ AudioDevice::~AudioDevice() {
void AudioDevice::Start() {
DCHECK(callback_) << "Initialize hasn't been called";
message_loop()->PostTask(FROM_HERE,
- base::Bind(&AudioDevice::InitializeOnIOThread, this, audio_parameters_));
+ base::Bind(&AudioDevice::CreateStreamOnIOThread, this,
+ audio_parameters_));
}
void AudioDevice::Stop() {
@@ -127,7 +124,7 @@ void AudioDevice::GetVolume(double* volume) {
*volume = volume_;
}
-void AudioDevice::InitializeOnIOThread(const media::AudioParameters& params) {
+void AudioDevice::CreateStreamOnIOThread(const media::AudioParameters& params) {
DCHECK(message_loop()->BelongsToCurrentThread());
// Make sure we don't create the stream more than once.
DCHECK_EQ(0, stream_id_);
diff --git a/content/renderer/media/audio_device.h b/content/renderer/media/audio_device.h
index 9410ce7..e63da0a 100644
--- a/content/renderer/media/audio_device.h
+++ b/content/renderer/media/audio_device.h
@@ -23,7 +23,7 @@
//
// Task [IO thread] IPC [IO thread]
//
-// Start -> InitializeOnIOThread ------> AudioHostMsg_CreateStream -------->
+// Start -> CreateStreamOnIOThread -----> AudioHostMsg_CreateStream ------>
// <- OnStreamCreated <- AudioMsg_NotifyStreamCreated <-
// ---> PlayOnIOThread -----------> AudioHostMsg_PlayStream -------->
//
@@ -96,27 +96,11 @@ class CONTENT_EXPORT AudioDevice
virtual void Initialize(const media::AudioParameters& params,
RenderCallback* callback) OVERRIDE;
- // Starts audio playback.
virtual void Start() OVERRIDE;
-
- // Stops audio playback.
virtual void Stop() OVERRIDE;
-
- // Resumes playback if currently paused.
virtual void Play() OVERRIDE;
-
- // Pauses playback.
- // If |flush| is true then any pending audio that is in the pipeline
- // (has not yet reached the hardware) will be discarded. In this case,
- // when Play() is later called, no previous pending audio will be
- // rendered.
virtual void Pause(bool flush) OVERRIDE;
-
- // Sets the playback volume, with range [0.0, 1.0] inclusive.
- // Returns |true| on success.
virtual bool SetVolume(double volume) OVERRIDE;
-
- // Gets the playback volume, with range [0.0, 1.0] inclusive.
virtual void GetVolume(double* volume) OVERRIDE;
// Methods called on IO thread ----------------------------------------------
@@ -136,7 +120,7 @@ class CONTENT_EXPORT AudioDevice
// The following methods are tasks posted on the IO thread that needs to
// be executed on that thread. They interact with AudioMessageFilter and
// sends IPC messages on that thread.
- void InitializeOnIOThread(const media::AudioParameters& params);
+ void CreateStreamOnIOThread(const media::AudioParameters& params);
void PlayOnIOThread();
void PauseOnIOThread(bool flush);
void ShutDownOnIOThread();
diff --git a/content/renderer/media/audio_hardware.cc b/content/renderer/media/audio_hardware.cc
index d753a54..a7322a0 100644
--- a/content/renderer/media/audio_hardware.cc
+++ b/content/renderer/media/audio_hardware.cc
@@ -48,36 +48,6 @@ size_t GetOutputBufferSize() {
return output_buffer_size;
}
-size_t GetHighLatencyOutputBufferSize(int sample_rate) {
- // TODO(vrk/crogers): The buffer sizes that this function computes is probably
- // overly conservative. However, reducing the buffer size to 2048-8192 bytes
- // caused crbug.com/108396. This computation should be revisited while making
- // sure crbug.com/108396 doesn't happen again.
-
- // The minimum number of samples in a hardware packet.
- // This value is selected so that we can handle down to 5khz sample rate.
- static const size_t kMinSamplesPerHardwarePacket = 1024;
-
- // The maximum number of samples in a hardware packet.
- // This value is selected so that we can handle up to 192khz sample rate.
- static const size_t kMaxSamplesPerHardwarePacket = 64 * 1024;
-
- // This constant governs the hardware audio buffer size, this value should be
- // chosen carefully.
- // This value is selected so that we have 8192 samples for 48khz streams.
- static const size_t kMillisecondsPerHardwarePacket = 170;
-
- // Select the number of samples that can provide at least
- // |kMillisecondsPerHardwarePacket| worth of audio data.
- size_t samples = kMinSamplesPerHardwarePacket;
- while (samples <= kMaxSamplesPerHardwarePacket &&
- samples * base::Time::kMillisecondsPerSecond <
- sample_rate * kMillisecondsPerHardwarePacket) {
- samples *= 2;
- }
- return samples;
-}
-
ChannelLayout GetInputChannelLayout() {
DCHECK(RenderThreadImpl::current() != NULL);
diff --git a/content/renderer/media/audio_hardware.h b/content/renderer/media/audio_hardware.h
index 30c4233..4d00246 100644
--- a/content/renderer/media/audio_hardware.h
+++ b/content/renderer/media/audio_hardware.h
@@ -28,10 +28,6 @@ CONTENT_EXPORT int GetInputSampleRate();
// Must be used in conjunction with AUDIO_PCM_LOW_LATENCY.
CONTENT_EXPORT size_t GetOutputBufferSize();
-// Computes a buffer size based on the given |sample_rate|. Must be used in
-// conjunction with AUDIO_PCM_LINEAR.
-CONTENT_EXPORT size_t GetHighLatencyOutputBufferSize(int sample_rate);
-
// Fetch the audio channel layout for the default input device.
// Must be called from RenderThreadImpl::current().
CONTENT_EXPORT ChannelLayout GetInputChannelLayout();
diff --git a/content/renderer/media/audio_input_device.h b/content/renderer/media/audio_input_device.h
index ac68b96..787dfad 100644
--- a/content/renderer/media/audio_input_device.h
+++ b/content/renderer/media/audio_input_device.h
@@ -95,8 +95,8 @@ class CONTENT_EXPORT AudioInputDevice
class CONTENT_EXPORT CaptureCallback {
public:
virtual void Capture(const std::vector<float*>& audio_data,
- size_t number_of_frames,
- size_t audio_delay_milliseconds,
+ int number_of_frames,
+ int audio_delay_milliseconds,
double volume) = 0;
virtual void OnCaptureError() = 0;
protected:
@@ -148,7 +148,7 @@ class CONTENT_EXPORT AudioInputDevice
return audio_parameters_.sample_rate();
}
- size_t buffer_size() const {
+ int buffer_size() const {
return audio_parameters_.frames_per_buffer();
}
diff --git a/content/renderer/media/audio_renderer_impl.cc b/content/renderer/media/audio_renderer_impl.cc
deleted file mode 100644
index 81d1cc6..0000000
--- a/content/renderer/media/audio_renderer_impl.cc
+++ /dev/null
@@ -1,229 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "content/renderer/media/audio_renderer_impl.h"
-
-#include <math.h>
-
-#include <algorithm>
-
-#include "base/bind.h"
-#include "content/common/child_process.h"
-#include "content/common/media/audio_messages.h"
-#include "content/renderer/media/audio_hardware.h"
-#include "content/renderer/render_thread_impl.h"
-#include "media/audio/audio_buffers_state.h"
-#include "media/audio/audio_util.h"
-#include "media/base/filter_host.h"
-
-AudioRendererImpl::AudioRendererImpl(media::AudioRendererSink* sink)
- : AudioRendererBase(),
- bytes_per_second_(0),
- stopped_(false),
- sink_(sink),
- is_initialized_(false) {
-}
-
-AudioRendererImpl::~AudioRendererImpl() {
-}
-
-base::TimeDelta AudioRendererImpl::ConvertToDuration(int bytes) {
- if (bytes_per_second_) {
- return base::TimeDelta::FromMicroseconds(
- base::Time::kMicrosecondsPerSecond * bytes / bytes_per_second_);
- }
- return base::TimeDelta();
-}
-
-void AudioRendererImpl::UpdateEarliestEndTime(int bytes_filled,
- base::TimeDelta request_delay,
- base::Time time_now) {
- if (bytes_filled != 0) {
- base::TimeDelta predicted_play_time = ConvertToDuration(bytes_filled);
- float playback_rate = GetPlaybackRate();
- if (playback_rate != 1.0f) {
- predicted_play_time = base::TimeDelta::FromMicroseconds(
- static_cast<int64>(ceil(predicted_play_time.InMicroseconds() *
- playback_rate)));
- }
- earliest_end_time_ =
- std::max(earliest_end_time_,
- time_now + request_delay + predicted_play_time);
- }
-}
-
-bool AudioRendererImpl::OnInitialize(int bits_per_channel,
- ChannelLayout channel_layout,
- int sample_rate) {
- // We use the AUDIO_PCM_LINEAR flag because AUDIO_PCM_LOW_LATENCY
- // does not currently support all the sample-rates that we require.
- // Please see: http://code.google.com/p/chromium/issues/detail?id=103627
- // for more details.
- audio_parameters_.Reset(
- media::AudioParameters::AUDIO_PCM_LINEAR,
- channel_layout, sample_rate, bits_per_channel,
- audio_hardware::GetHighLatencyOutputBufferSize(sample_rate));
-
- bytes_per_second_ = audio_parameters_.GetBytesPerSecond();
-
- DCHECK(sink_.get());
-
- if (!is_initialized_) {
- sink_->Initialize(audio_parameters_, this);
-
- sink_->Start();
- is_initialized_ = true;
- return true;
- }
-
- return false;
-}
-
-void AudioRendererImpl::OnStop() {
- if (stopped_)
- return;
-
- DCHECK(sink_.get());
- sink_->Stop();
-
- stopped_ = true;
-}
-
-void AudioRendererImpl::SetPlaybackRate(float rate) {
- DCHECK_LE(0.0f, rate);
-
- // Handle the case where we stopped due to IO message loop dying.
- if (stopped_) {
- AudioRendererBase::SetPlaybackRate(rate);
- return;
- }
-
- // We have two cases here:
- // Play: GetPlaybackRate() == 0.0 && rate != 0.0
- // Pause: GetPlaybackRate() != 0.0 && rate == 0.0
- if (GetPlaybackRate() == 0.0f && rate != 0.0f) {
- DoPlay();
- } else if (GetPlaybackRate() != 0.0f && rate == 0.0f) {
- // Pause is easy, we can always pause.
- DoPause();
- }
- AudioRendererBase::SetPlaybackRate(rate);
-}
-
-void AudioRendererImpl::Pause(const base::Closure& callback) {
- AudioRendererBase::Pause(callback);
- if (stopped_)
- return;
-
- DoPause();
-}
-
-void AudioRendererImpl::Seek(base::TimeDelta time,
- const media::PipelineStatusCB& cb) {
- AudioRendererBase::Seek(time, cb);
- if (stopped_)
- return;
-
- DoSeek();
-}
-
-void AudioRendererImpl::Play(const base::Closure& callback) {
- AudioRendererBase::Play(callback);
- if (stopped_)
- return;
-
- if (GetPlaybackRate() != 0.0f) {
- DoPlay();
- } else {
- DoPause();
- }
-}
-
-void AudioRendererImpl::SetVolume(float volume) {
- if (stopped_)
- return;
- DCHECK(sink_.get());
- sink_->SetVolume(volume);
-}
-
-void AudioRendererImpl::DoPlay() {
- earliest_end_time_ = base::Time::Now();
- DCHECK(sink_.get());
- sink_->Play();
-}
-
-void AudioRendererImpl::DoPause() {
- DCHECK(sink_.get());
- sink_->Pause(false);
-}
-
-void AudioRendererImpl::DoSeek() {
- earliest_end_time_ = base::Time::Now();
-
- // Pause and flush the stream when we seek to a new location.
- DCHECK(sink_.get());
- sink_->Pause(true);
-}
-
-size_t AudioRendererImpl::Render(const std::vector<float*>& audio_data,
- size_t number_of_frames,
- size_t audio_delay_milliseconds) {
- if (stopped_ || GetPlaybackRate() == 0.0f) {
- // Output silence if stopped.
- for (size_t i = 0; i < audio_data.size(); ++i)
- memset(audio_data[i], 0, sizeof(float) * number_of_frames);
- return 0;
- }
-
- // Adjust the playback delay.
- base::TimeDelta request_delay =
- base::TimeDelta::FromMilliseconds(audio_delay_milliseconds);
-
- // Finally we need to adjust the delay according to playback rate.
- if (GetPlaybackRate() != 1.0f) {
- request_delay = base::TimeDelta::FromMicroseconds(
- static_cast<int64>(ceil(request_delay.InMicroseconds() *
- GetPlaybackRate())));
- }
-
- int bytes_per_frame = audio_parameters_.GetBytesPerFrame();
-
- const size_t buf_size = number_of_frames * bytes_per_frame;
- scoped_array<uint8> buf(new uint8[buf_size]);
-
- uint32 frames_filled = FillBuffer(buf.get(), number_of_frames, request_delay);
- uint32 bytes_filled = frames_filled * bytes_per_frame;
- DCHECK_LE(bytes_filled, buf_size);
- UpdateEarliestEndTime(bytes_filled, request_delay, base::Time::Now());
-
- // Deinterleave each audio channel.
- int channels = audio_data.size();
- for (int channel_index = 0; channel_index < channels; ++channel_index) {
- media::DeinterleaveAudioChannel(buf.get(),
- audio_data[channel_index],
- channels,
- channel_index,
- bytes_per_frame / channels,
- frames_filled);
-
- // If FillBuffer() didn't give us enough data then zero out the remainder.
- if (frames_filled < number_of_frames) {
- int frames_to_zero = number_of_frames - frames_filled;
- memset(audio_data[channel_index] + frames_filled,
- 0,
- sizeof(float) * frames_to_zero);
- }
- }
- return frames_filled;
-}
-
-void AudioRendererImpl::OnRenderError() {
- host()->DisableAudioRenderer();
-}
-
-void AudioRendererImpl::OnRenderEndOfStream() {
- // TODO(enal): schedule callback instead of polling.
- if (base::Time::Now() >= earliest_end_time_)
- SignalEndOfStream();
-}
diff --git a/content/renderer/media/audio_renderer_impl.h b/content/renderer/media/audio_renderer_impl.h
deleted file mode 100644
index 978f5a0..0000000
--- a/content/renderer/media/audio_renderer_impl.h
+++ /dev/null
@@ -1,137 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-//
-// Audio rendering unit utilizing AudioDevice.
-//
-// This class lives inside three threads during it's lifetime, namely:
-// 1. Render thread.
-// This object is created on the render thread.
-// 2. Pipeline thread
-// OnInitialize() is called here with the audio format.
-// Play/Pause/Seek also happens here.
-// 3. Audio thread created by the AudioDevice.
-// Render() is called here where audio data is decoded into raw PCM data.
-
-#ifndef CONTENT_RENDERER_MEDIA_AUDIO_RENDERER_IMPL_H_
-#define CONTENT_RENDERER_MEDIA_AUDIO_RENDERER_IMPL_H_
-#pragma once
-
-#include <vector>
-
-#include "base/gtest_prod_util.h"
-#include "base/memory/scoped_ptr.h"
-#include "base/synchronization/lock.h"
-#include "content/renderer/media/audio_device.h"
-#include "media/audio/audio_io.h"
-#include "media/audio/audio_parameters.h"
-#include "media/base/audio_renderer_sink.h"
-#include "media/filters/audio_renderer_base.h"
-
-class AudioMessageFilter;
-
-class CONTENT_EXPORT AudioRendererImpl
- : public media::AudioRendererBase,
- NON_EXPORTED_BASE(public media::AudioRendererSink::RenderCallback) {
- public:
- // Methods called on Render thread ------------------------------------------
- // An AudioRendererSink is used as the destination for the rendered audio.
- explicit AudioRendererImpl(media::AudioRendererSink* sink);
- virtual ~AudioRendererImpl();
-
- // Methods called on pipeline thread ----------------------------------------
- // media::Filter implementation.
- virtual void SetPlaybackRate(float rate) OVERRIDE;
- virtual void Pause(const base::Closure& callback) OVERRIDE;
- virtual void Seek(base::TimeDelta time,
- const media::PipelineStatusCB& cb) OVERRIDE;
- virtual void Play(const base::Closure& callback) OVERRIDE;
-
- // media::AudioRenderer implementation.
- virtual void SetVolume(float volume) OVERRIDE;
-
- protected:
- // Methods called on pipeline thread ----------------------------------------
- // These methods are called from AudioRendererBase.
- virtual bool OnInitialize(int bits_per_channel,
- ChannelLayout channel_layout,
- int sample_rate) OVERRIDE;
- virtual void OnStop() OVERRIDE;
- virtual void OnRenderEndOfStream() OVERRIDE;
-
- private:
- // For access to constructor and IO thread methods.
- friend class AudioRendererImplTest;
- friend class DelegateCaller;
- FRIEND_TEST_ALL_PREFIXES(AudioRendererImplTest, Stop);
- FRIEND_TEST_ALL_PREFIXES(AudioRendererImplTest,
- DestroyedMessageLoop_ConsumeAudioSamples);
- FRIEND_TEST_ALL_PREFIXES(AudioRendererImplTest, UpdateEarliestEndTime);
- // Helper methods.
- // Convert number of bytes to duration of time using information about the
- // number of channels, sample rate and sample bits.
- base::TimeDelta ConvertToDuration(int bytes);
-
- // Methods called on pipeline thread ----------------------------------------
- void DoPlay();
- void DoPause();
- void DoSeek();
-
- // media::AudioRendererSink::RenderCallback implementation.
- virtual size_t Render(const std::vector<float*>& audio_data,
- size_t number_of_frames,
- size_t audio_delay_milliseconds) OVERRIDE;
- virtual void OnRenderError() OVERRIDE;
-
- // Accessors used by tests.
- base::Time earliest_end_time() const {
- return earliest_end_time_;
- }
-
- void set_earliest_end_time(const base::Time& earliest_end_time) {
- earliest_end_time_ = earliest_end_time;
- }
-
- uint32 bytes_per_second() const {
- return bytes_per_second_;
- }
-
- // Estimate earliest time when current buffer can stop playing.
- void UpdateEarliestEndTime(int bytes_filled,
- base::TimeDelta request_delay,
- base::Time time_now);
-
- // Used to calculate audio delay given bytes.
- uint32 bytes_per_second_;
-
- // A flag that indicates this filter is called to stop.
- bool stopped_;
-
- // The sink (destination) for rendered audio.
- scoped_refptr<media::AudioRendererSink> sink_;
-
- // Set to true when OnInitialize() is called.
- bool is_initialized_;
-
- // We're supposed to know amount of audio data OS or hardware buffered, but
- // that is not always so -- on my Linux box
- // AudioBuffersState::hardware_delay_bytes never reaches 0.
- //
- // As a result we cannot use it to find when stream ends. If we just ignore
- // buffered data we will notify host that stream ended before it is actually
- // did so, I've seen it done ~140ms too early when playing ~150ms file.
- //
- // Instead of trying to invent OS-specific solution for each and every OS we
- // are supporting, use simple workaround: every time we fill the buffer we
- // remember when it should stop playing, and do not assume that buffer is
- // empty till that time. Workaround is not bulletproof, as we don't exactly
- // know when that particular data would start playing, but it is much better
- // than nothing.
- base::Time earliest_end_time_;
-
- media::AudioParameters audio_parameters_;
-
- DISALLOW_COPY_AND_ASSIGN(AudioRendererImpl);
-};
-
-#endif // CONTENT_RENDERER_MEDIA_AUDIO_RENDERER_IMPL_H_
diff --git a/content/renderer/media/audio_renderer_impl_unittest.cc b/content/renderer/media/audio_renderer_impl_unittest.cc
deleted file mode 100644
index a0bb00c..0000000
--- a/content/renderer/media/audio_renderer_impl_unittest.cc
+++ /dev/null
@@ -1,199 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "base/bind.h"
-#include "base/bind_helpers.h"
-#include "base/message_loop.h"
-#include "base/process_util.h"
-#include "base/synchronization/waitable_event.h"
-#include "base/test/test_timeouts.h"
-#include "base/time.h"
-#include "content/common/child_process.h"
-#include "content/common/child_thread.h"
-#include "content/renderer/media/audio_renderer_impl.h"
-#include "content/renderer/mock_content_renderer_client.h"
-#include "content/renderer/render_process.h"
-#include "content/renderer/render_thread_impl.h"
-#include "ipc/ipc_channel.h"
-#include "media/base/data_buffer.h"
-#include "media/base/mock_callback.h"
-#include "media/base/mock_filter_host.h"
-#include "media/base/mock_filters.h"
-#include "testing/gtest/include/gtest/gtest.h"
-
-using ::testing::Return;
-
-namespace {
-// This class is a mock of the child process singleton which is needed
-// to be able to create a RenderThread object.
-class MockRenderProcess : public RenderProcess {
- public:
- MockRenderProcess() {}
- virtual ~MockRenderProcess() {}
-
- // RenderProcess implementation.
- virtual skia::PlatformCanvas* GetDrawingCanvas(TransportDIB** memory,
- const gfx::Rect& rect) { return NULL; }
- virtual void ReleaseTransportDIB(TransportDIB* memory) {}
- virtual bool UseInProcessPlugins() const { return false; }
- virtual void AddBindings(int bindings) {}
- virtual int GetEnabledBindings() const { return 0; }
- virtual bool HasInitializedMediaLibrary() const { return false; }
-
- private:
- DISALLOW_COPY_AND_ASSIGN(MockRenderProcess);
-};
-}
-
-// This callback can be posted on the IO thread and will signal an event when
-// done. The caller can then wait for this signal to ensure that no
-// additional tasks remain in the task queue.
-void WaitCallback(base::WaitableEvent* event) {
- event->Signal();
-}
-
-// Class we would be testing.
-class TestAudioRendererImpl : public AudioRendererImpl {
- public:
- explicit TestAudioRendererImpl(media::AudioRendererSink* sink)
- : AudioRendererImpl(sink) {
- }
-};
-
-class AudioRendererImplTest
- : public ::testing::Test,
- public IPC::Channel::Listener {
- public:
- // IPC::Channel::Listener implementation.
- virtual bool OnMessageReceived(const IPC::Message& message) {
- NOTIMPLEMENTED();
- return true;
- }
-
- static const int kSize;
-
- AudioRendererImplTest() {}
- virtual ~AudioRendererImplTest() {}
-
- virtual void SetUp() {
- // This part sets up a RenderThread environment to ensure that
- // RenderThread::current() (<=> TLS pointer) is valid.
- // Main parts are inspired by the RenderViewFakeResourcesTest.
- // Note that, the IPC part is not utilized in this test.
- content::GetContentClient()->set_renderer(&mock_content_renderer_client_);
-
- static const char kThreadName[] = "RenderThread";
- channel_.reset(new IPC::Channel(kThreadName,
- IPC::Channel::MODE_SERVER, this));
- ASSERT_TRUE(channel_->Connect());
-
- mock_process_.reset(new MockRenderProcess);
- render_thread_ = new RenderThreadImpl(kThreadName);
-
- // Setup expectations for initialization.
- decoder_ = new media::MockAudioDecoder();
-
- EXPECT_CALL(*decoder_, bits_per_channel())
- .WillRepeatedly(Return(16));
- EXPECT_CALL(*decoder_, channel_layout())
- .WillRepeatedly(Return(CHANNEL_LAYOUT_MONO));
- EXPECT_CALL(*decoder_, samples_per_second())
- .WillRepeatedly(Return(44100));
-
- // Create a sink for the audio renderer.
- scoped_refptr<media::AudioRendererSink> default_sink =
- new AudioDevice();
-
- // Create and initialize the audio renderer.
- renderer_ = new TestAudioRendererImpl(default_sink.get());
- renderer_->Initialize(decoder_,
- media::NewExpectedStatusCB(media::PIPELINE_OK),
- NewUnderflowClosure(), NewTimeClosure());
-
- // We need an event to verify that all tasks are done before leaving
- // our tests.
- event_.reset(new base::WaitableEvent(false, false));
- }
-
- virtual void TearDown() {
- mock_process_.reset();
- }
-
- MOCK_METHOD0(OnUnderflow, void());
-
- base::Closure NewUnderflowClosure() {
- return base::Bind(&AudioRendererImplTest::OnUnderflow,
- base::Unretained(this));
- }
-
- void OnTimeCallback(
- base::TimeDelta current_time, base::TimeDelta max_time) {
- CHECK(current_time <= max_time);
- }
-
- media::AudioRenderer::TimeCB NewTimeClosure() {
- return base::Bind(&AudioRendererImplTest::OnTimeCallback,
- base::Unretained(this));
- }
-
- protected:
- // Posts a final task to the IO message loop and waits for completion.
- void WaitForIOThreadCompletion() {
- ChildProcess::current()->io_message_loop()->PostTask(
- FROM_HERE, base::Bind(&WaitCallback, base::Unretained(event_.get())));
- EXPECT_TRUE(event_->TimedWait(
- base::TimeDelta::FromMilliseconds(TestTimeouts::action_timeout_ms())));
- }
-
- MessageLoopForIO message_loop_;
- content::MockContentRendererClient mock_content_renderer_client_;
- scoped_ptr<IPC::Channel> channel_;
- RenderThreadImpl* render_thread_; // owned by mock_process_
- scoped_ptr<MockRenderProcess> mock_process_;
- scoped_refptr<media::MockAudioDecoder> decoder_;
- scoped_refptr<AudioRendererImpl> renderer_;
- scoped_ptr<base::WaitableEvent> event_;
-
- private:
- DISALLOW_COPY_AND_ASSIGN(AudioRendererImplTest);
-};
-
-const int AudioRendererImplTest::kSize = 1024;
-
-TEST_F(AudioRendererImplTest, SetPlaybackRate) {
- // Execute SetPlaybackRate() codepath by toggling play/pause.
- // These methods will be called on the pipeline thread but calling from
- // here is fine for this test. Tasks will be posted internally on
- // the IO thread.
- renderer_->SetPlaybackRate(0.0f);
- renderer_->SetPlaybackRate(1.0f);
- renderer_->SetPlaybackRate(0.0f);
-
- renderer_->Stop(media::NewExpectedClosure());
- WaitForIOThreadCompletion();
-}
-
-TEST_F(AudioRendererImplTest, SetVolume) {
- // Execute SetVolume() codepath.
- // This method will be called on the pipeline thread IRL.
- // Tasks will be posted internally on the IO thread.
- renderer_->SetVolume(0.5f);
-
- renderer_->Stop(media::NewExpectedClosure());
- WaitForIOThreadCompletion();
-}
-
-TEST_F(AudioRendererImplTest, UpdateEarliestEndTime) {
- renderer_->SetPlaybackRate(1.0f);
- WaitForIOThreadCompletion();
- base::Time time_now = base::Time(); // Null time by default.
- renderer_->set_earliest_end_time(time_now);
- renderer_->UpdateEarliestEndTime(renderer_->bytes_per_second(),
- base::TimeDelta::FromMilliseconds(100),
- time_now);
- int time_delta = (renderer_->earliest_end_time() - time_now).InMilliseconds();
- EXPECT_EQ(1100, time_delta);
- renderer_->Stop(media::NewExpectedClosure());
- WaitForIOThreadCompletion();
-}
diff --git a/content/renderer/media/render_audiosourceprovider.cc b/content/renderer/media/render_audiosourceprovider.cc
index a627bfa..1144b52 100644
--- a/content/renderer/media/render_audiosourceprovider.cc
+++ b/content/renderer/media/render_audiosourceprovider.cc
@@ -57,6 +57,12 @@ void RenderAudioSourceProvider::Pause(bool flush) {
is_running_ = false;
}
+void RenderAudioSourceProvider::SetPlaybackRate(float rate) {
+ base::AutoLock auto_lock(sink_lock_);
+ if (!client_)
+ default_sink_->SetPlaybackRate(rate);
+}
+
bool RenderAudioSourceProvider::SetVolume(double volume) {
base::AutoLock auto_lock(sink_lock_);
if (!client_)
diff --git a/content/renderer/media/render_audiosourceprovider.h b/content/renderer/media/render_audiosourceprovider.h
index 6d842cb..f69c99d 100644
--- a/content/renderer/media/render_audiosourceprovider.h
+++ b/content/renderer/media/render_audiosourceprovider.h
@@ -57,6 +57,7 @@ class RenderAudioSourceProvider
virtual void Stop() OVERRIDE;
virtual void Play() OVERRIDE;
virtual void Pause(bool flush) OVERRIDE;
+ virtual void SetPlaybackRate(float rate) OVERRIDE;
virtual bool SetVolume(double volume) OVERRIDE;
virtual void GetVolume(double* volume) OVERRIDE;
virtual void Initialize(
diff --git a/content/renderer/media/renderer_webaudiodevice_impl.cc b/content/renderer/media/renderer_webaudiodevice_impl.cc
index 2a90144..07ea9c2 100644
--- a/content/renderer/media/renderer_webaudiodevice_impl.cc
+++ b/content/renderer/media/renderer_webaudiodevice_impl.cc
@@ -37,9 +37,9 @@ double RendererWebAudioDeviceImpl::sampleRate() {
return 44100.0;
}
-size_t RendererWebAudioDeviceImpl::Render(const std::vector<float*>& audio_data,
- size_t number_of_frames,
- size_t audio_delay_milliseconds) {
+int RendererWebAudioDeviceImpl::Render(const std::vector<float*>& audio_data,
+ int number_of_frames,
+ int audio_delay_milliseconds) {
// Make the client callback to get rendered audio.
DCHECK(client_callback_);
if (client_callback_) {
diff --git a/content/renderer/media/renderer_webaudiodevice_impl.h b/content/renderer/media/renderer_webaudiodevice_impl.h
index a70c0884..5b06242 100644
--- a/content/renderer/media/renderer_webaudiodevice_impl.h
+++ b/content/renderer/media/renderer_webaudiodevice_impl.h
@@ -30,9 +30,9 @@ class RendererWebAudioDeviceImpl
virtual double sampleRate();
// AudioDevice::RenderCallback implementation.
- virtual size_t Render(const std::vector<float*>& audio_data,
- size_t number_of_frames,
- size_t audio_delay_milliseconds) OVERRIDE;
+ virtual int Render(const std::vector<float*>& audio_data,
+ int number_of_frames,
+ int audio_delay_milliseconds) OVERRIDE;
virtual void OnRenderError() OVERRIDE;
private:
diff --git a/content/renderer/media/webrtc_audio_device_impl.cc b/content/renderer/media/webrtc_audio_device_impl.cc
index 03953c3..a215eb4 100644
--- a/content/renderer/media/webrtc_audio_device_impl.cc
+++ b/content/renderer/media/webrtc_audio_device_impl.cc
@@ -72,10 +72,10 @@ int32_t WebRtcAudioDeviceImpl::Release() {
return ret;
}
-size_t WebRtcAudioDeviceImpl::Render(
+int WebRtcAudioDeviceImpl::Render(
const std::vector<float*>& audio_data,
- size_t number_of_frames,
- size_t audio_delay_milliseconds) {
+ int number_of_frames,
+ int audio_delay_milliseconds) {
DCHECK_LE(number_of_frames, output_buffer_size());
{
@@ -92,12 +92,12 @@ size_t WebRtcAudioDeviceImpl::Render(
// Even if the hardware runs at 44.1kHz, we use 44.0 internally.
samples_per_sec = 44000;
}
- uint32_t samples_per_10_msec = (samples_per_sec / 100);
+ int samples_per_10_msec = (samples_per_sec / 100);
const int bytes_per_10_msec =
channels * samples_per_10_msec * bytes_per_sample_;
uint32_t num_audio_samples = 0;
- size_t accumulated_audio_samples = 0;
+ int accumulated_audio_samples = 0;
char* audio_byte_buffer = reinterpret_cast<char*>(output_buffer_.get());
@@ -137,8 +137,8 @@ void WebRtcAudioDeviceImpl::OnRenderError() {
}
void WebRtcAudioDeviceImpl::Capture(const std::vector<float*>& audio_data,
- size_t number_of_frames,
- size_t audio_delay_milliseconds,
+ int number_of_frames,
+ int audio_delay_milliseconds,
double volume) {
DCHECK_LE(number_of_frames, input_buffer_size());
#if defined(OS_WIN) || defined(OS_MACOSX)
@@ -179,7 +179,8 @@ void WebRtcAudioDeviceImpl::Capture(const std::vector<float*>& audio_data,
const int samples_per_10_msec = (samples_per_sec / 100);
const int bytes_per_10_msec =
channels * samples_per_10_msec * bytes_per_sample_;
- size_t accumulated_audio_samples = 0;
+ int accumulated_audio_samples = 0;
+
char* audio_byte_buffer = reinterpret_cast<char*>(input_buffer_.get());
// Map internal volume range of [0.0, 1.0] into [0, 255] used by the
diff --git a/content/renderer/media/webrtc_audio_device_impl.h b/content/renderer/media/webrtc_audio_device_impl.h
index 8900ebf..5d948ca 100644
--- a/content/renderer/media/webrtc_audio_device_impl.h
+++ b/content/renderer/media/webrtc_audio_device_impl.h
@@ -222,15 +222,15 @@ class CONTENT_EXPORT WebRtcAudioDeviceImpl
static bool ImplementsThreadSafeReferenceCounting() { return true; }
// AudioDevice::RenderCallback implementation.
- virtual size_t Render(const std::vector<float*>& audio_data,
- size_t number_of_frames,
- size_t audio_delay_milliseconds) OVERRIDE;
+ virtual int Render(const std::vector<float*>& audio_data,
+ int number_of_frames,
+ int audio_delay_milliseconds) OVERRIDE;
virtual void OnRenderError() OVERRIDE;
// AudioInputDevice::CaptureCallback implementation.
virtual void Capture(const std::vector<float*>& audio_data,
- size_t number_of_frames,
- size_t audio_delay_milliseconds,
+ int number_of_frames,
+ int audio_delay_milliseconds,
double volume) OVERRIDE;
virtual void OnCaptureError() OVERRIDE;
@@ -365,10 +365,10 @@ class CONTENT_EXPORT WebRtcAudioDeviceImpl
void SetSessionId(int session_id);
// Accessors.
- size_t input_buffer_size() const {
+ int input_buffer_size() const {
return input_audio_parameters_.frames_per_buffer();
}
- size_t output_buffer_size() const {
+ int output_buffer_size() const {
return output_audio_parameters_.frames_per_buffer();
}
int input_channels() const {
diff --git a/content/renderer/render_view_impl.cc b/content/renderer/render_view_impl.cc
index 8103dc5..76e4781 100644
--- a/content/renderer/render_view_impl.cc
+++ b/content/renderer/render_view_impl.cc
@@ -64,7 +64,6 @@
#include "content/renderer/java/java_bridge_dispatcher.h"
#include "content/renderer/load_progress_tracker.h"
#include "content/renderer/media/audio_message_filter.h"
-#include "content/renderer/media/audio_renderer_impl.h"
#include "content/renderer/media/media_stream_dependency_factory.h"
#include "content/renderer/media/media_stream_dispatcher.h"
#include "content/renderer/media/media_stream_impl.h"
@@ -94,6 +93,7 @@
#include "media/base/filter_collection.h"
#include "media/base/media_switches.h"
#include "media/base/message_loop_factory.h"
+#include "media/filters/audio_renderer_base.h"
#include "media/filters/gpu_video_decoder.h"
#include "net/base/escape.h"
#include "net/base/net_errors.h"
@@ -2174,8 +2174,8 @@ WebMediaPlayer* RenderViewImpl::createMediaPlayer(
// Add the chrome specific audio renderer, using audio_source_provider
// as the sink.
- AudioRendererImpl* audio_renderer =
- new AudioRendererImpl(audio_source_provider);
+ media::AudioRendererBase* audio_renderer =
+ new media::AudioRendererBase(audio_source_provider);
collection->AddAudioRenderer(audio_renderer);
}
diff --git a/media/audio/audio_util.cc b/media/audio/audio_util.cc
index c15e845..01d71ee 100644
--- a/media/audio/audio_util.cc
+++ b/media/audio/audio_util.cc
@@ -427,6 +427,38 @@ ChannelLayout GetAudioInputHardwareChannelLayout(const std::string& device_id) {
#endif
}
+// Computes a buffer size based on the given |sample_rate|. Must be used in
+// conjunction with AUDIO_PCM_LINEAR.
+size_t GetHighLatencyOutputBufferSize(int sample_rate) {
+ // TODO(vrk/crogers): The buffer sizes that this function computes is probably
+ // overly conservative. However, reducing the buffer size to 2048-8192 bytes
+ // caused crbug.com/108396. This computation should be revisited while making
+ // sure crbug.com/108396 doesn't happen again.
+
+ // The minimum number of samples in a hardware packet.
+ // This value is selected so that we can handle down to 5khz sample rate.
+ static const size_t kMinSamplesPerHardwarePacket = 1024;
+
+ // The maximum number of samples in a hardware packet.
+ // This value is selected so that we can handle up to 192khz sample rate.
+ static const size_t kMaxSamplesPerHardwarePacket = 64 * 1024;
+
+ // This constant governs the hardware audio buffer size, this value should be
+ // chosen carefully.
+ // This value is selected so that we have 8192 samples for 48khz streams.
+ static const size_t kMillisecondsPerHardwarePacket = 170;
+
+ // Select the number of samples that can provide at least
+ // |kMillisecondsPerHardwarePacket| worth of audio data.
+ size_t samples = kMinSamplesPerHardwarePacket;
+ while (samples <= kMaxSamplesPerHardwarePacket &&
+ samples * base::Time::kMillisecondsPerSecond <
+ sample_rate * kMillisecondsPerHardwarePacket) {
+ samples *= 2;
+ }
+ return samples;
+}
+
// When transferring data in the shared memory, first word is size of data
// in bytes. Actual data starts immediately after it.
diff --git a/media/audio/audio_util.h b/media/audio/audio_util.h
index 2fccc1e..df5683f 100644
--- a/media/audio/audio_util.h
+++ b/media/audio/audio_util.h
@@ -107,6 +107,10 @@ MEDIA_EXPORT size_t GetAudioHardwareBufferSize();
MEDIA_EXPORT ChannelLayout GetAudioInputHardwareChannelLayout(
const std::string& device_id);
+// Computes a buffer size based on the given |sample_rate|. Must be used in
+// conjunction with AUDIO_PCM_LINEAR.
+MEDIA_EXPORT size_t GetHighLatencyOutputBufferSize(int sample_rate);
+
// Functions that handle data buffer passed between processes in the shared
// memory. Called on both IPC sides.
diff --git a/media/audio/null_audio_sink.cc b/media/audio/null_audio_sink.cc
new file mode 100644
index 0000000..e8cd4eb
--- /dev/null
+++ b/media/audio/null_audio_sink.cc
@@ -0,0 +1,110 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "media/audio/null_audio_sink.h"
+
+#include "base/bind.h"
+#include "base/threading/platform_thread.h"
+
+namespace media {
+
+NullAudioSink::NullAudioSink()
+ : initialized_(false),
+ playback_rate_(0.0),
+ playing_(false),
+ callback_(NULL),
+ thread_("NullAudioThread") {
+}
+
+NullAudioSink::~NullAudioSink() {
+ DCHECK(!thread_.IsRunning());
+ for (size_t i = 0; i < audio_data_.size(); ++i)
+ delete [] audio_data_[i];
+}
+
+
+void NullAudioSink::Start() {
+ if (!thread_.Start())
+ return;
+
+ thread_.message_loop()->PostTask(FROM_HERE, base::Bind(
+ &NullAudioSink::FillBufferTask, this));
+}
+
+void NullAudioSink::Stop() {
+ SetPlaying(false);
+ thread_.Stop();
+}
+
+void NullAudioSink::Play() {
+ SetPlaying(true);
+}
+
+void NullAudioSink::Pause(bool /* flush */) {
+ SetPlaying(false);
+}
+
+void NullAudioSink::SetPlaybackRate(float rate) {
+ base::AutoLock auto_lock(lock_);
+ playback_rate_ = rate;
+}
+
+bool NullAudioSink::SetVolume(double volume) {
+ // Audio is always muted.
+ return volume == 0.0;
+}
+
+void NullAudioSink::GetVolume(double* volume) {
+ // Audio is always muted.
+ *volume = 0.0;
+}
+
+void NullAudioSink::SetPlaying(bool is_playing) {
+ base::AutoLock auto_lock(lock_);
+ playing_ = is_playing;
+}
+
+void NullAudioSink::Initialize(const AudioParameters& params,
+ RenderCallback* callback) {
+ DCHECK(!initialized_);
+ params_ = params;
+
+ audio_data_.reserve(params.channels());
+ for (int i = 0; i < params.channels(); ++i) {
+ float* channel_data = new float[params.frames_per_buffer()];
+ audio_data_.push_back(channel_data);
+ }
+
+ callback_ = callback;
+ initialized_ = true;
+}
+
+void NullAudioSink::FillBufferTask() {
+ base::AutoLock auto_lock(lock_);
+
+ base::TimeDelta delay;
+ // Only consume buffers when actually playing.
+ if (playing_) {
+ DCHECK_GT(playback_rate_, 0.0f);
+ int requested_frames = params_.frames_per_buffer();
+ int frames_received = callback_->Render(audio_data_, requested_frames, 0);
+ int frames_per_millisecond =
+ params_.sample_rate() / base::Time::kMillisecondsPerSecond;
+
+ // Calculate our sleep duration, taking playback rate into consideration.
+ delay = base::TimeDelta::FromMilliseconds(
+ frames_received / (frames_per_millisecond * playback_rate_));
+ } else {
+ // If paused, sleep for 10 milliseconds before polling again.
+ delay = base::TimeDelta::FromMilliseconds(10);
+ }
+
+ // Sleep for at least one millisecond so we don't spin the CPU.
+ MessageLoop::current()->PostDelayedTask(
+ FROM_HERE,
+ base::Bind(&NullAudioSink::FillBufferTask, this),
+ std::max(delay, base::TimeDelta::FromMilliseconds(1)));
+}
+
+} // namespace media
diff --git a/media/audio/null_audio_sink.h b/media/audio/null_audio_sink.h
new file mode 100644
index 0000000..32245eb
--- /dev/null
+++ b/media/audio/null_audio_sink.h
@@ -0,0 +1,65 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef MEDIA_FILTERS_NULL_AUDIO_RENDERER_H_
+#define MEDIA_FILTERS_NULL_AUDIO_RENDERER_H_
+
+// NullAudioSink effectively uses an extra thread to "throw away" the
+// audio data at a rate resembling normal playback speed. It's just like
+// decoding to /dev/null!
+//
+// NullAudioSink can also be used in situations where the client has no
+// audio device or we haven't written an audio implementation for a particular
+// platform yet.
+
+#include <vector>
+
+#include "base/threading/thread.h"
+#include "media/base/audio_renderer_sink.h"
+
+namespace media {
+
+class MEDIA_EXPORT NullAudioSink
+ : NON_EXPORTED_BASE(public AudioRendererSink) {
+ public:
+ NullAudioSink();
+ virtual ~NullAudioSink();
+
+ // AudioRendererSink implementation.
+ virtual void Initialize(const AudioParameters& params,
+ RenderCallback* callback) OVERRIDE;
+ virtual void Start() OVERRIDE;
+ virtual void Stop() OVERRIDE;
+ virtual void Pause(bool flush) OVERRIDE;
+ virtual void Play() OVERRIDE;
+ virtual void SetPlaybackRate(float rate) OVERRIDE;
+ virtual bool SetVolume(double volume) OVERRIDE;
+ virtual void GetVolume(double* volume) OVERRIDE;
+
+ private:
+ // Audio thread task that periodically calls FillBuffer() to consume
+ // audio data.
+ void FillBufferTask();
+
+ void SetPlaying(bool is_playing);
+
+ // A buffer passed to FillBuffer to advance playback.
+ std::vector<float*> audio_data_;
+
+ AudioParameters params_;
+ bool initialized_;
+ float playback_rate_;
+ bool playing_;
+ RenderCallback* callback_;
+
+ // Separate thread used to throw away data.
+ base::Thread thread_;
+ base::Lock lock_;
+
+ DISALLOW_COPY_AND_ASSIGN(NullAudioSink);
+};
+
+} // namespace media
+
+#endif // MEDIA_FILTERS_NULL_AUDIO_RENDERER_H_
diff --git a/media/base/audio_renderer_sink.h b/media/base/audio_renderer_sink.h
index 01eb185..49ddbf5 100644
--- a/media/base/audio_renderer_sink.h
+++ b/media/base/audio_renderer_sink.h
@@ -28,9 +28,9 @@ class AudioRendererSink
// continuous stream). That actual number of frames is passed to host
// together with PCM audio data and host is free to use or ignore it.
// TODO(crogers): use base:Callback instead.
- virtual size_t Render(const std::vector<float*>& audio_data,
- size_t number_of_frames,
- size_t audio_delay_milliseconds) = 0;
+ virtual int Render(const std::vector<float*>& audio_data,
+ int number_of_frames,
+ int audio_delay_milliseconds) = 0;
// Signals an error has occurred.
virtual void OnRenderError() = 0;
@@ -58,6 +58,10 @@ class AudioRendererSink
// Resumes playback after calling Pause().
virtual void Play() = 0;
+ // Called to inform the sink of a change in playback rate. Override if
+ // subclass needs the playback rate.
+ virtual void SetPlaybackRate(float rate) {};
+
// Sets the playback volume, with range [0.0, 1.0] inclusive.
// Returns |true| on success.
virtual bool SetVolume(double volume) = 0;
diff --git a/media/base/pipeline.cc b/media/base/pipeline.cc
index dd0822e..699aba7 100644
--- a/media/base/pipeline.cc
+++ b/media/base/pipeline.cc
@@ -436,7 +436,7 @@ base::TimeDelta Pipeline::GetDuration() const {
}
void Pipeline::OnAudioTimeUpdate(base::TimeDelta time,
- base::TimeDelta max_time) {
+ base::TimeDelta max_time) {
DCHECK(time <= max_time);
DCHECK(IsRunning());
base::AutoLock auto_lock(lock_);
diff --git a/media/filters/audio_renderer_base.cc b/media/filters/audio_renderer_base.cc
index eda4d9e..21d9bc5 100644
--- a/media/filters/audio_renderer_base.cc
+++ b/media/filters/audio_renderer_base.cc
@@ -4,23 +4,27 @@
#include "media/filters/audio_renderer_base.h"
-#include <algorithm>
-#include <string>
+#include <math.h>
#include "base/bind.h"
#include "base/callback.h"
#include "base/callback_helpers.h"
#include "base/logging.h"
#include "media/base/filter_host.h"
+#include "media/audio/audio_util.h"
namespace media {
-AudioRendererBase::AudioRendererBase()
+AudioRendererBase::AudioRendererBase(media::AudioRendererSink* sink)
: state_(kUninitialized),
pending_read_(false),
received_end_of_stream_(false),
rendered_end_of_stream_(false),
bytes_per_frame_(0),
+ bytes_per_second_(0),
+ stopped_(false),
+ sink_(sink),
+ is_initialized_(false),
read_cb_(base::Bind(&AudioRendererBase::DecodedAudioReady,
base::Unretained(this))) {
}
@@ -32,25 +36,53 @@ AudioRendererBase::~AudioRendererBase() {
}
void AudioRendererBase::Play(const base::Closure& callback) {
- base::AutoLock auto_lock(lock_);
- DCHECK_EQ(kPaused, state_);
- state_ = kPlaying;
- callback.Run();
-}
+ {
+ base::AutoLock auto_lock(lock_);
+ DCHECK_EQ(kPaused, state_);
+ state_ = kPlaying;
+ callback.Run();
+ }
-void AudioRendererBase::Pause(const base::Closure& callback) {
- base::AutoLock auto_lock(lock_);
- DCHECK(state_ == kPlaying || state_ == kUnderflow || state_ == kRebuffering);
- pause_cb_ = callback;
- state_ = kPaused;
+ if (stopped_)
+ return;
- // Pause only when we've completed our pending read.
- if (!pending_read_) {
- pause_cb_.Run();
- pause_cb_.Reset();
+ if (GetPlaybackRate() != 0.0f) {
+ DoPlay();
} else {
+ DoPause();
+ }
+}
+
+void AudioRendererBase::DoPlay() {
+ earliest_end_time_ = base::Time::Now();
+ DCHECK(sink_.get());
+ sink_->Play();
+}
+
+void AudioRendererBase::Pause(const base::Closure& callback) {
+ {
+ base::AutoLock auto_lock(lock_);
+ DCHECK(state_ == kPlaying || state_ == kUnderflow ||
+ state_ == kRebuffering);
+ pause_cb_ = callback;
state_ = kPaused;
+
+ // Pause only when we've completed our pending read.
+ if (!pending_read_) {
+ pause_cb_.Run();
+ pause_cb_.Reset();
+ }
}
+
+ if (stopped_)
+ return;
+
+ DoPause();
+}
+
+void AudioRendererBase::DoPause() {
+ DCHECK(sink_.get());
+ sink_->Pause(false);
}
void AudioRendererBase::Flush(const base::Closure& callback) {
@@ -58,7 +90,12 @@ void AudioRendererBase::Flush(const base::Closure& callback) {
}
void AudioRendererBase::Stop(const base::Closure& callback) {
- OnStop();
+ if (!stopped_) {
+ DCHECK(sink_.get());
+ sink_->Stop();
+
+ stopped_ = true;
+ }
{
base::AutoLock auto_lock(lock_);
state_ = kStopped;
@@ -82,12 +119,24 @@ void AudioRendererBase::Seek(base::TimeDelta time, const PipelineStatusCB& cb) {
seek_timestamp_ = time;
// Throw away everything and schedule our reads.
- last_fill_buffer_time_ = base::TimeDelta();
+ audio_time_buffered_ = base::TimeDelta();
received_end_of_stream_ = false;
rendered_end_of_stream_ = false;
// |algorithm_| will request more reads.
algorithm_->FlushBuffers();
+
+ if (stopped_)
+ return;
+
+ DoSeek();
+}
+
+void AudioRendererBase::DoSeek() {
+ earliest_end_time_ = base::Time::Now();
+
+ // Pause and flush the stream when we seek to a new location.
+ sink_->Pause(true);
}
void AudioRendererBase::Initialize(const scoped_refptr<AudioDecoder>& decoder,
@@ -120,16 +169,32 @@ void AudioRendererBase::Initialize(const scoped_refptr<AudioDecoder>& decoder,
bool config_ok = algorithm_->ValidateConfig(channels, sample_rate,
bits_per_channel);
- if (config_ok)
- algorithm_->Initialize(channels, sample_rate, bits_per_channel, 0.0f, cb);
-
- // Give the subclass an opportunity to initialize itself.
- if (!config_ok || !OnInitialize(bits_per_channel, channel_layout,
- sample_rate)) {
+ if (!config_ok || is_initialized_) {
init_cb.Run(PIPELINE_ERROR_INITIALIZATION_FAILED);
return;
}
+ if (config_ok)
+ algorithm_->Initialize(channels, sample_rate, bits_per_channel, 0.0f, cb);
+
+ // We use the AUDIO_PCM_LINEAR flag because AUDIO_PCM_LOW_LATENCY
+ // does not currently support all the sample-rates that we require.
+ // Please see: http://code.google.com/p/chromium/issues/detail?id=103627
+ // for more details.
+ audio_parameters_ = AudioParameters(
+ AudioParameters::AUDIO_PCM_LINEAR, channel_layout, sample_rate,
+ bits_per_channel, GetHighLatencyOutputBufferSize(sample_rate));
+
+ bytes_per_second_ = audio_parameters_.GetBytesPerSecond();
+
+ DCHECK(sink_.get());
+ DCHECK(!is_initialized_);
+
+ sink_->Initialize(audio_parameters_, this);
+
+ sink_->Start();
+ is_initialized_ = true;
+
// Finally, execute the start callback.
state_ = kPaused;
init_cb.Run(PIPELINE_OK);
@@ -152,6 +217,12 @@ void AudioRendererBase::ResumeAfterUnderflow(bool buffer_more_audio) {
}
}
+void AudioRendererBase::SetVolume(float volume) {
+ if (stopped_)
+ return;
+ sink_->SetVolume(volume);
+}
+
void AudioRendererBase::DecodedAudioReady(scoped_refptr<Buffer> buffer) {
base::AutoLock auto_lock(lock_);
DCHECK(state_ == kPaused || state_ == kSeeking || state_ == kPlaying ||
@@ -203,12 +274,115 @@ void AudioRendererBase::DecodedAudioReady(scoped_refptr<Buffer> buffer) {
}
}
+void AudioRendererBase::SignalEndOfStream() {
+ DCHECK(received_end_of_stream_);
+ if (!rendered_end_of_stream_) {
+ rendered_end_of_stream_ = true;
+ host()->NotifyEnded();
+ }
+}
+
+void AudioRendererBase::ScheduleRead_Locked() {
+ lock_.AssertAcquired();
+ if (pending_read_ || state_ == kPaused)
+ return;
+ pending_read_ = true;
+ decoder_->Read(read_cb_);
+}
+
+void AudioRendererBase::SetPlaybackRate(float playback_rate) {
+ DCHECK_LE(0.0f, playback_rate);
+
+ if (!stopped_) {
+ // Notify sink of new playback rate.
+ sink_->SetPlaybackRate(playback_rate);
+
+ // We have two cases here:
+ // Play: GetPlaybackRate() == 0.0 && playback_rate != 0.0
+ // Pause: GetPlaybackRate() != 0.0 && playback_rate == 0.0
+ if (GetPlaybackRate() == 0.0f && playback_rate != 0.0f) {
+ DoPlay();
+ } else if (GetPlaybackRate() != 0.0f && playback_rate == 0.0f) {
+ // Pause is easy, we can always pause.
+ DoPause();
+ }
+ }
+
+ base::AutoLock auto_lock(lock_);
+ algorithm_->SetPlaybackRate(playback_rate);
+}
+
+float AudioRendererBase::GetPlaybackRate() {
+ base::AutoLock auto_lock(lock_);
+ return algorithm_->playback_rate();
+}
+
+bool AudioRendererBase::IsBeforeSeekTime(const scoped_refptr<Buffer>& buffer) {
+ return (state_ == kSeeking) && buffer && !buffer->IsEndOfStream() &&
+ (buffer->GetTimestamp() + buffer->GetDuration()) < seek_timestamp_;
+}
+
+int AudioRendererBase::Render(const std::vector<float*>& audio_data,
+ int number_of_frames,
+ int audio_delay_milliseconds) {
+ if (stopped_ || GetPlaybackRate() == 0.0f) {
+ // Output silence if stopped.
+ for (size_t i = 0; i < audio_data.size(); ++i)
+ memset(audio_data[i], 0, sizeof(float) * number_of_frames);
+ return 0;
+ }
+
+ // Adjust the playback delay.
+ base::TimeDelta request_delay =
+ base::TimeDelta::FromMilliseconds(audio_delay_milliseconds);
+
+ // Finally we need to adjust the delay according to playback rate.
+ if (GetPlaybackRate() != 1.0f) {
+ request_delay = base::TimeDelta::FromMicroseconds(
+ static_cast<int64>(ceil(request_delay.InMicroseconds() *
+ GetPlaybackRate())));
+ }
+
+ int bytes_per_frame = audio_parameters_.GetBytesPerFrame();
+
+ const int buf_size = number_of_frames * bytes_per_frame;
+ scoped_array<uint8> buf(new uint8[buf_size]);
+
+ int frames_filled = FillBuffer(buf.get(), number_of_frames, request_delay);
+ int bytes_filled = frames_filled * bytes_per_frame;
+ DCHECK_LE(bytes_filled, buf_size);
+ UpdateEarliestEndTime(bytes_filled, request_delay, base::Time::Now());
+
+ // Deinterleave each audio channel.
+ int channels = audio_data.size();
+ for (int channel_index = 0; channel_index < channels; ++channel_index) {
+ media::DeinterleaveAudioChannel(buf.get(),
+ audio_data[channel_index],
+ channels,
+ channel_index,
+ bytes_per_frame / channels,
+ frames_filled);
+
+ // If FillBuffer() didn't give us enough data then zero out the remainder.
+ if (frames_filled < number_of_frames) {
+ int frames_to_zero = number_of_frames - frames_filled;
+ memset(audio_data[channel_index] + frames_filled,
+ 0,
+ sizeof(float) * frames_to_zero);
+ }
+ }
+ return frames_filled;
+}
+
uint32 AudioRendererBase::FillBuffer(uint8* dest,
uint32 requested_frames,
const base::TimeDelta& playback_delay) {
- // The timestamp of the last buffer written during the last call to
- // FillBuffer().
- base::TimeDelta last_fill_buffer_time;
+ // The |audio_time_buffered_| is the ending timestamp of the last frame
+ // buffered at the audio device. |playback_delay| is the amount of time
+ // buffered at the audio device. The current time can be computed by their
+ // difference.
+ base::TimeDelta current_time = audio_time_buffered_ - playback_delay;
+
size_t frames_written = 0;
base::Closure underflow_cb;
{
@@ -232,10 +406,6 @@ uint32 AudioRendererBase::FillBuffer(uint8* dest,
return zeros_to_write / bytes_per_frame_;
}
- // Save a local copy of last fill buffer time and reset the member.
- last_fill_buffer_time = last_fill_buffer_time_;
- last_fill_buffer_time_ = base::TimeDelta();
-
// Use three conditions to determine the end of playback:
// 1. Algorithm needs more audio data.
// 2. We've received an end of stream buffer.
@@ -251,7 +421,9 @@ uint32 AudioRendererBase::FillBuffer(uint8* dest,
// 3. Have not received an end of stream buffer.
if (algorithm_->NeedsMoreData()) {
if (received_end_of_stream_) {
- OnRenderEndOfStream();
+ // TODO(enal): schedule callback instead of polling.
+ if (base::Time::Now() >= earliest_end_time_)
+ SignalEndOfStream();
} else if (state_ == kPlaying) {
state_ = kUnderflow;
underflow_cb = underflow_cb_;
@@ -260,17 +432,17 @@ uint32 AudioRendererBase::FillBuffer(uint8* dest,
// Otherwise fill the buffer.
frames_written = algorithm_->FillBuffer(dest, requested_frames);
}
-
- // Get the current time.
- last_fill_buffer_time_ = algorithm_->GetTime();
}
- // Update the pipeline's time if it was set last time.
- base::TimeDelta new_current_time = last_fill_buffer_time - playback_delay;
- if (last_fill_buffer_time.InMicroseconds() > 0 &&
- (last_fill_buffer_time != last_fill_buffer_time_ ||
- new_current_time > host()->GetTime())) {
- time_cb_.Run(new_current_time, last_fill_buffer_time);
+ base::TimeDelta previous_time_buffered = audio_time_buffered_;
+ // The call to FillBuffer() on |algorithm_| has increased the amount of
+ // buffered audio data. Update the new amount of time buffered.
+ audio_time_buffered_ = algorithm_->GetTime();
+
+ if (previous_time_buffered.InMicroseconds() > 0 &&
+ (previous_time_buffered != audio_time_buffered_ ||
+ current_time > host()->GetTime())) {
+ time_cb_.Run(current_time, audio_time_buffered_);
}
if (!underflow_cb.is_null())
@@ -279,35 +451,33 @@ uint32 AudioRendererBase::FillBuffer(uint8* dest,
return frames_written;
}
-void AudioRendererBase::SignalEndOfStream() {
- DCHECK(received_end_of_stream_);
- if (!rendered_end_of_stream_) {
- rendered_end_of_stream_ = true;
- host()->NotifyEnded();
+void AudioRendererBase::UpdateEarliestEndTime(int bytes_filled,
+ base::TimeDelta request_delay,
+ base::Time time_now) {
+ if (bytes_filled != 0) {
+ base::TimeDelta predicted_play_time = ConvertToDuration(bytes_filled);
+ float playback_rate = GetPlaybackRate();
+ if (playback_rate != 1.0f) {
+ predicted_play_time = base::TimeDelta::FromMicroseconds(
+ static_cast<int64>(ceil(predicted_play_time.InMicroseconds() *
+ playback_rate)));
+ }
+ earliest_end_time_ =
+ std::max(earliest_end_time_,
+ time_now + request_delay + predicted_play_time);
}
}
-void AudioRendererBase::ScheduleRead_Locked() {
- lock_.AssertAcquired();
- if (pending_read_ || state_ == kPaused)
- return;
- pending_read_ = true;
- decoder_->Read(read_cb_);
-}
-
-void AudioRendererBase::SetPlaybackRate(float playback_rate) {
- base::AutoLock auto_lock(lock_);
- algorithm_->SetPlaybackRate(playback_rate);
-}
-
-float AudioRendererBase::GetPlaybackRate() {
- base::AutoLock auto_lock(lock_);
- return algorithm_->playback_rate();
+base::TimeDelta AudioRendererBase::ConvertToDuration(int bytes) {
+ if (bytes_per_second_) {
+ return base::TimeDelta::FromMicroseconds(
+ base::Time::kMicrosecondsPerSecond * bytes / bytes_per_second_);
+ }
+ return base::TimeDelta();
}
-bool AudioRendererBase::IsBeforeSeekTime(const scoped_refptr<Buffer>& buffer) {
- return (state_ == kSeeking) && buffer && !buffer->IsEndOfStream() &&
- (buffer->GetTimestamp() + buffer->GetDuration()) < seek_timestamp_;
+void AudioRendererBase::OnRenderError() {
+ host()->DisableAudioRenderer();
}
} // namespace media
diff --git a/media/filters/audio_renderer_base.h b/media/filters/audio_renderer_base.h
index 4059622..776bee0 100644
--- a/media/filters/audio_renderer_base.h
+++ b/media/filters/audio_renderer_base.h
@@ -2,19 +2,20 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// AudioRendererBase takes care of the tricky queuing work and provides simple
-// methods for subclasses to peek and poke at audio data. In addition to
-// AudioRenderer interface methods this classes doesn't implement, subclasses
-// must also implement the following methods:
-// OnInitialized
-// OnStop
-// OnRenderEndOfStream
+// Audio rendering unit utilizing an AudioRendererSink to output data.
//
-// The general assumption is that subclasses start a callback-based audio thread
-// which needs to be filled with decoded audio data. AudioDecoderBase provides
-// FillBuffer which handles filling the provided buffer, dequeuing items,
-// scheduling additional reads and updating the clock. In a sense,
-// AudioRendererBase is the producer and the subclass is the consumer.
+// This class lives inside three threads during it's lifetime, namely:
+// 1. Render thread.
+// This object is created on the render thread.
+// 2. Pipeline thread
+// Initialize() is called here with the audio format.
+// Play/Pause/Seek also happens here.
+// 3. Audio thread created by the AudioRendererSink.
+// Render() is called here where audio data is decoded into raw PCM data.
+//
+// AudioRendererBase talks to an AudioRendererAlgorithmBase that takes care of
+// queueing audio data and stretching/shrinking audio data when playback rate !=
+// 1.0 or 0.0.
#ifndef MEDIA_FILTERS_AUDIO_RENDERER_BASE_H_
#define MEDIA_FILTERS_AUDIO_RENDERER_BASE_H_
@@ -23,22 +24,29 @@
#include "base/synchronization/lock.h"
#include "media/base/audio_decoder.h"
+#include "media/base/audio_renderer_sink.h"
#include "media/base/buffers.h"
#include "media/base/filters.h"
#include "media/filters/audio_renderer_algorithm_base.h"
namespace media {
-class MEDIA_EXPORT AudioRendererBase : public AudioRenderer {
+class MEDIA_EXPORT AudioRendererBase
+ : public AudioRenderer,
+ NON_EXPORTED_BASE(public media::AudioRendererSink::RenderCallback) {
public:
- AudioRendererBase();
+ // Methods called on Render thread ------------------------------------------
+ // An AudioRendererSink is used as the destination for the rendered audio.
+ explicit AudioRendererBase(media::AudioRendererSink* sink);
virtual ~AudioRendererBase();
+ // Methods called on pipeline thread ----------------------------------------
// Filter implementation.
virtual void Play(const base::Closure& callback) OVERRIDE;
virtual void Pause(const base::Closure& callback) OVERRIDE;
virtual void Flush(const base::Closure& callback) OVERRIDE;
virtual void Stop(const base::Closure& callback) OVERRIDE;
+ virtual void SetPlaybackRate(float rate) OVERRIDE;
virtual void Seek(base::TimeDelta time, const PipelineStatusCB& cb) OVERRIDE;
// AudioRenderer implementation.
@@ -48,26 +56,13 @@ class MEDIA_EXPORT AudioRendererBase : public AudioRenderer {
const TimeCB& time_cb) OVERRIDE;
virtual bool HasEnded() OVERRIDE;
virtual void ResumeAfterUnderflow(bool buffer_more_audio) OVERRIDE;
+ virtual void SetVolume(float volume) OVERRIDE;
- protected:
+ private:
+ friend class AudioRendererBaseTest;
FRIEND_TEST_ALL_PREFIXES(AudioRendererBaseTest, EndOfStream);
FRIEND_TEST_ALL_PREFIXES(AudioRendererBaseTest, Underflow_EndOfStream);
- // Subclasses should return true if they were able to initialize, false
- // otherwise.
- virtual bool OnInitialize(int bits_per_channel,
- ChannelLayout channel_layout,
- int sample_rate) = 0;
-
- // Called by Stop(). Subclasses should perform any necessary cleanup during
- // this time, such as stopping any running threads.
- virtual void OnStop() = 0;
-
- // Method called by FillBuffer() when it finds that it reached end of stream.
- // FillBuffer() cannot immediately signal end of stream event because browser
- // may have buffered data.
- virtual void OnRenderEndOfStream() = 0;
-
// Callback from the audio decoder delivering decoded audio samples.
void DecodedAudioReady(scoped_refptr<Buffer> buffer);
@@ -88,27 +83,39 @@ class MEDIA_EXPORT AudioRendererBase : public AudioRenderer {
// should the filled buffer be played. If FillBuffer() is called as the audio
// hardware plays the buffer, then |playback_delay| should be zero.
//
- // FillBuffer() calls OnRenderEndOfStream() when it reaches end of stream.
- // It is responsibility of derived class to provide implementation of
- // OnRenderEndOfStream() that calls SignalEndOfStream() when all the hardware
- // buffers become empty (i.e. when all the data written to the device has
- // been played).
+ // FillBuffer() calls SignalEndOfStream() when it reaches end of stream.
//
// Safe to call on any thread.
uint32 FillBuffer(uint8* dest,
uint32 requested_frames,
const base::TimeDelta& playback_delay);
- // Called by OnRenderEndOfStream() or some callback scheduled by derived class
- // to signal end of stream.
+ // Called at the end of stream when all the hardware buffers become empty
+ // (i.e. when all the data written to the device has been played).
void SignalEndOfStream();
- // Get/Set the playback rate of |algorithm_|.
- virtual void SetPlaybackRate(float playback_rate) OVERRIDE;
- virtual float GetPlaybackRate();
+ // Get the playback rate of |algorithm_|.
+ float GetPlaybackRate();
- private:
- friend class AudioRendererBaseTest;
+ // Convert number of bytes to duration of time using information about the
+ // number of channels, sample rate and sample bits.
+ base::TimeDelta ConvertToDuration(int bytes);
+
+ // Estimate earliest time when current buffer can stop playing.
+ void UpdateEarliestEndTime(int bytes_filled,
+ base::TimeDelta request_delay,
+ base::Time time_now);
+
+ // Methods called on pipeline thread ----------------------------------------
+ void DoPlay();
+ void DoPause();
+ void DoSeek();
+
+ // media::AudioRendererSink::RenderCallback implementation.
+ virtual int Render(const std::vector<float*>& audio_data,
+ int number_of_frames,
+ int audio_delay_milliseconds) OVERRIDE;
+ virtual void OnRenderError() OVERRIDE;
// Helper method that schedules an asynchronous read from the decoder and
// increments |pending_reads_|.
@@ -148,9 +155,9 @@ class MEDIA_EXPORT AudioRendererBase : public AudioRenderer {
bool received_end_of_stream_;
bool rendered_end_of_stream_;
- // Audio time at end of last call to FillBuffer().
+ // The timestamp of the last frame (i.e. furthest in the future) buffered.
// TODO(ralphl): Update this value after seeking.
- base::TimeDelta last_fill_buffer_time_;
+ base::TimeDelta audio_time_buffered_;
// Filter callbacks.
base::Closure pause_cb_;
@@ -164,6 +171,36 @@ class MEDIA_EXPORT AudioRendererBase : public AudioRenderer {
uint32 bytes_per_frame_;
+ // Used to calculate audio delay given bytes.
+ uint32 bytes_per_second_;
+
+ // A flag that indicates this filter is called to stop.
+ bool stopped_;
+
+ // The sink (destination) for rendered audio.
+ scoped_refptr<media::AudioRendererSink> sink_;
+
+ // Set to true when OnInitialize() is called.
+ bool is_initialized_;
+
+ // We're supposed to know amount of audio data OS or hardware buffered, but
+ // that is not always so -- on my Linux box
+ // AudioBuffersState::hardware_delay_bytes never reaches 0.
+ //
+ // As a result we cannot use it to find when stream ends. If we just ignore
+ // buffered data we will notify host that stream ended before it is actually
+ // did so, I've seen it done ~140ms too early when playing ~150ms file.
+ //
+ // Instead of trying to invent OS-specific solution for each and every OS we
+ // are supporting, use simple workaround: every time we fill the buffer we
+ // remember when it should stop playing, and do not assume that buffer is
+ // empty till that time. Workaround is not bulletproof, as we don't exactly
+ // know when that particular data would start playing, but it is much better
+ // than nothing.
+ base::Time earliest_end_time_;
+
+ AudioParameters audio_parameters_;
+
AudioDecoder::ReadCB read_cb_;
DISALLOW_COPY_AND_ASSIGN(AudioRendererBase);
diff --git a/media/filters/audio_renderer_base_unittest.cc b/media/filters/audio_renderer_base_unittest.cc
index 9880e5c..ee756f1 100644
--- a/media/filters/audio_renderer_base_unittest.cc
+++ b/media/filters/audio_renderer_base_unittest.cc
@@ -16,10 +16,26 @@ using ::testing::_;
using ::testing::AnyNumber;
using ::testing::Invoke;
using ::testing::Return;
-using ::testing::ReturnPointee;
-using ::testing::SaveArg;
+using ::testing::NiceMock;
using ::testing::StrictMock;
+namespace {
+
+class MockAudioSink : public media::AudioRendererSink {
+ public:
+ MOCK_METHOD2(Initialize, void(const media::AudioParameters& params,
+ RenderCallback* callback));
+ MOCK_METHOD0(Start, void());
+ MOCK_METHOD0(Stop, void());
+ MOCK_METHOD1(Pause, void(bool flush));
+ MOCK_METHOD0(Play, void());
+ MOCK_METHOD1(SetPlaybackRate, void(float rate));
+ MOCK_METHOD1(SetVolume, bool(double volume));
+ MOCK_METHOD1(GetVolume, void(double* volume));
+};
+
+} // namespace
+
namespace media {
// Constants for distinguishing between muted audio and playing audio when using
@@ -27,30 +43,11 @@ namespace media {
static uint8 kMutedAudio = 0x00;
static uint8 kPlayingAudio = 0x99;
-// Mocked subclass of AudioRendererBase for testing purposes.
-class MockAudioRendererBase : public AudioRendererBase {
- public:
- MockAudioRendererBase()
- : AudioRendererBase() {}
- virtual ~MockAudioRendererBase() {}
-
- // AudioRenderer implementation.
- MOCK_METHOD1(SetVolume, void(float volume));
-
- // AudioRendererBase implementation.
- MOCK_METHOD3(OnInitialize, bool(int, ChannelLayout, int));
- MOCK_METHOD0(OnStop, void());
- MOCK_METHOD0(OnRenderEndOfStream, void());
-
- private:
- DISALLOW_COPY_AND_ASSIGN(MockAudioRendererBase);
-};
-
class AudioRendererBaseTest : public ::testing::Test {
public:
// Give the decoder some non-garbage media properties.
AudioRendererBaseTest()
- : renderer_(new MockAudioRendererBase()),
+ : renderer_(new AudioRendererBase(new NiceMock<MockAudioSink>())),
decoder_(new MockAudioDecoder()) {
renderer_->set_host(&host_);
@@ -59,13 +56,7 @@ class AudioRendererBaseTest : public ::testing::Test {
.WillByDefault(Invoke(this, &AudioRendererBaseTest::SaveReadCallback));
// Set up audio properties.
- ON_CALL(*decoder_, bits_per_channel())
- .WillByDefault(Return(16));
- ON_CALL(*decoder_, channel_layout())
- .WillByDefault(Return(CHANNEL_LAYOUT_MONO));
- ON_CALL(*decoder_, samples_per_second())
- .WillByDefault(Return(44100));
-
+ SetSupportedAudioDecoderProperties();
EXPECT_CALL(*decoder_, bits_per_channel())
.Times(AnyNumber());
EXPECT_CALL(*decoder_, channel_layout())
@@ -75,7 +66,6 @@ class AudioRendererBaseTest : public ::testing::Test {
}
virtual ~AudioRendererBaseTest() {
- EXPECT_CALL(*renderer_, OnStop());
renderer_->Stop(NewExpectedClosure());
}
@@ -91,6 +81,24 @@ class AudioRendererBaseTest : public ::testing::Test {
base::Unretained(this));
}
+ void SetSupportedAudioDecoderProperties() {
+ ON_CALL(*decoder_, bits_per_channel())
+ .WillByDefault(Return(16));
+ ON_CALL(*decoder_, channel_layout())
+ .WillByDefault(Return(CHANNEL_LAYOUT_MONO));
+ ON_CALL(*decoder_, samples_per_second())
+ .WillByDefault(Return(44100));
+ }
+
+ void SetUnsupportedAudioDecoderProperties() {
+ ON_CALL(*decoder_, bits_per_channel())
+ .WillByDefault(Return(3));
+ ON_CALL(*decoder_, channel_layout())
+ .WillByDefault(Return(CHANNEL_LAYOUT_UNSUPPORTED));
+ ON_CALL(*decoder_, samples_per_second())
+ .WillByDefault(Return(0));
+ }
+
void OnAudioTimeCallback(
base::TimeDelta current_time, base::TimeDelta max_time) {
CHECK(current_time <= max_time);
@@ -102,8 +110,6 @@ class AudioRendererBaseTest : public ::testing::Test {
}
void Initialize() {
- EXPECT_CALL(*renderer_, OnInitialize(_, _, _))
- .WillOnce(Return(true));
renderer_->Initialize(
decoder_, NewExpectedStatusCB(PIPELINE_OK), NewUnderflowClosure(),
NewAudioTimeClosure());
@@ -218,7 +224,7 @@ class AudioRendererBaseTest : public ::testing::Test {
}
// Fixture members.
- scoped_refptr<MockAudioRendererBase> renderer_;
+ scoped_refptr<AudioRendererBase> renderer_;
scoped_refptr<MockAudioDecoder> decoder_;
StrictMock<MockFilterHost> host_;
AudioDecoder::ReadCB read_cb_;
@@ -234,8 +240,7 @@ class AudioRendererBaseTest : public ::testing::Test {
};
TEST_F(AudioRendererBaseTest, Initialize_Failed) {
- EXPECT_CALL(*renderer_, OnInitialize(_, _, _))
- .WillOnce(Return(false));
+ SetUnsupportedAudioDecoderProperties();
renderer_->Initialize(
decoder_,
NewExpectedStatusCB(PIPELINE_ERROR_INITIALIZATION_FAILED),
@@ -246,8 +251,6 @@ TEST_F(AudioRendererBaseTest, Initialize_Failed) {
}
TEST_F(AudioRendererBaseTest, Initialize_Successful) {
- EXPECT_CALL(*renderer_, OnInitialize(_, _, _))
- .WillOnce(Return(true));
renderer_->Initialize(decoder_, NewExpectedStatusCB(PIPELINE_OK),
NewUnderflowClosure(), NewAudioTimeClosure());
@@ -285,8 +288,6 @@ TEST_F(AudioRendererBaseTest, EndOfStream) {
EXPECT_FALSE(renderer_->HasEnded());
// Drain internal buffer, now we should report ended.
- EXPECT_CALL(*renderer_, OnRenderEndOfStream())
- .WillOnce(Invoke(renderer_.get(), &AudioRendererBase::SignalEndOfStream));
EXPECT_CALL(host_, NotifyEnded());
EXPECT_TRUE(ConsumeBufferedData(bytes_buffered(), NULL));
EXPECT_TRUE(renderer_->HasEnded());
@@ -367,8 +368,6 @@ TEST_F(AudioRendererBaseTest, Underflow_EndOfStream) {
// stop reading after receiving an end of stream buffer. It should have also
// called NotifyEnded() http://crbug.com/106641
DeliverEndOfStream();
- EXPECT_CALL(*renderer_, OnRenderEndOfStream())
- .WillOnce(Invoke(renderer_.get(), &AudioRendererBase::SignalEndOfStream));
EXPECT_CALL(host_, NotifyEnded());
EXPECT_CALL(host_, GetTime()).WillOnce(Return(base::TimeDelta()));
diff --git a/media/filters/null_audio_renderer.cc b/media/filters/null_audio_renderer.cc
deleted file mode 100644
index f7ee27c..0000000
--- a/media/filters/null_audio_renderer.cc
+++ /dev/null
@@ -1,95 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include <algorithm>
-#include <cmath>
-
-#include "base/bind.h"
-#include "base/logging.h"
-#include "base/threading/platform_thread.h"
-#include "media/base/filter_host.h"
-#include "media/filters/null_audio_renderer.h"
-
-namespace media {
-
-// How "long" our buffer should be in terms of milliseconds. In OnInitialize
-// we calculate the size of one second of audio data and use this number to
-// allocate a buffer to pass to FillBuffer.
-static const size_t kBufferSizeInMilliseconds = 100;
-
-NullAudioRenderer::NullAudioRenderer()
- : AudioRendererBase(),
- bytes_per_millisecond_(0),
- buffer_size_(0),
- bytes_per_frame_(0),
- thread_("AudioThread") {
-}
-
-NullAudioRenderer::~NullAudioRenderer() {
- DCHECK(!thread_.IsRunning());
-}
-
-void NullAudioRenderer::SetVolume(float volume) {
- // Do nothing.
-}
-
-bool NullAudioRenderer::OnInitialize(int bits_per_channel,
- ChannelLayout channel_layout,
- int sample_rate) {
- // Calculate our bytes per millisecond value and allocate our buffer.
- int channels = ChannelLayoutToChannelCount(channel_layout);
- int bytes_per_channel = bits_per_channel / 8;
- bytes_per_frame_ = channels * bytes_per_channel;
-
- bytes_per_millisecond_ = (bytes_per_frame_ * sample_rate) /
- base::Time::kMillisecondsPerSecond;
-
- buffer_size_ =
- bytes_per_millisecond_ * kBufferSizeInMilliseconds;
-
- buffer_.reset(new uint8[buffer_size_]);
- DCHECK(buffer_.get());
-
- if (!thread_.Start())
- return false;
-
- thread_.message_loop()->PostTask(FROM_HERE, base::Bind(
- &NullAudioRenderer::FillBufferTask, this));
- return true;
-}
-
-void NullAudioRenderer::OnStop() {
- thread_.Stop();
-}
-
-void NullAudioRenderer::FillBufferTask() {
- base::TimeDelta delay;
-
- // Only consume buffers when actually playing.
- if (GetPlaybackRate() > 0.0f) {
- size_t requested_frames = buffer_size_ / bytes_per_frame_;
- size_t frames = FillBuffer(
- buffer_.get(), requested_frames, base::TimeDelta());
- size_t bytes = frames * bytes_per_frame_;
-
- // Calculate our sleep duration, taking playback rate into consideration.
- delay = base::TimeDelta::FromMilliseconds(
- bytes / (bytes_per_millisecond_ * GetPlaybackRate()));
- } else {
- // If paused, sleep for 10 milliseconds before polling again.
- delay = base::TimeDelta::FromMilliseconds(10);
- }
-
- // Sleep for at least one millisecond so we don't spin the CPU.
- MessageLoop::current()->PostDelayedTask(
- FROM_HERE,
- base::Bind(&NullAudioRenderer::FillBufferTask, this),
- std::max(delay, base::TimeDelta::FromMilliseconds(1)));
-}
-
-void NullAudioRenderer::OnRenderEndOfStream() {
- SignalEndOfStream();
-}
-
-} // namespace media
diff --git a/media/filters/null_audio_renderer.h b/media/filters/null_audio_renderer.h
deleted file mode 100644
index 03500ca..0000000
--- a/media/filters/null_audio_renderer.h
+++ /dev/null
@@ -1,65 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef MEDIA_FILTERS_NULL_AUDIO_RENDERER_H_
-#define MEDIA_FILTERS_NULL_AUDIO_RENDERER_H_
-
-// NullAudioRenderer effectively uses an extra thread to "throw away" the
-// audio data at a rate resembling normal playback speed. It's just like
-// decoding to /dev/null!
-//
-// NullAudioRenderer can also be used in situations where the client has no
-// audio device or we haven't written an audio implementation for a particular
-// platform yet.
-
-#include <deque>
-
-#include "base/memory/scoped_ptr.h"
-#include "base/threading/thread.h"
-#include "media/base/buffers.h"
-#include "media/base/filters.h"
-#include "media/filters/audio_renderer_base.h"
-
-namespace media {
-
-class MEDIA_EXPORT NullAudioRenderer : public AudioRendererBase {
- public:
- NullAudioRenderer();
- virtual ~NullAudioRenderer();
-
- // AudioRenderer implementation.
- virtual void SetVolume(float volume) OVERRIDE;
-
- protected:
- // AudioRendererBase implementation.
- virtual bool OnInitialize(int bits_per_channel,
- ChannelLayout channel_layout,
- int sample_rate) OVERRIDE;
- virtual void OnStop() OVERRIDE;
- virtual void OnRenderEndOfStream() OVERRIDE;
-
- private:
- // Audio thread task that periodically calls FillBuffer() to consume
- // audio data.
- void FillBufferTask();
-
- // A number to convert bytes written in FillBuffer to milliseconds based on
- // the audio format.
- size_t bytes_per_millisecond_;
-
- // A buffer passed to FillBuffer to advance playback.
- scoped_array<uint8> buffer_;
- size_t buffer_size_;
-
- size_t bytes_per_frame_;
-
- // Separate thread used to throw away data.
- base::Thread thread_;
-
- DISALLOW_COPY_AND_ASSIGN(NullAudioRenderer);
-};
-
-} // namespace media
-
-#endif // MEDIA_FILTERS_NULL_AUDIO_RENDERER_H_
diff --git a/media/filters/pipeline_integration_test_base.cc b/media/filters/pipeline_integration_test_base.cc
index 6fcead0..fb348f1 100644
--- a/media/filters/pipeline_integration_test_base.cc
+++ b/media/filters/pipeline_integration_test_base.cc
@@ -6,12 +6,13 @@
#include "base/bind.h"
#include "media/base/media_log.h"
+#include "media/audio/null_audio_sink.h"
+#include "media/filters/audio_renderer_base.h"
#include "media/filters/chunk_demuxer.h"
#include "media/filters/ffmpeg_audio_decoder.h"
#include "media/filters/ffmpeg_demuxer.h"
#include "media/filters/ffmpeg_video_decoder.h"
#include "media/filters/file_data_source.h"
-#include "media/filters/null_audio_renderer.h"
using ::testing::AnyNumber;
@@ -177,7 +178,7 @@ PipelineIntegrationTestBase::CreateFilterCollection(
base::Unretained(this)),
false);
collection->AddVideoRenderer(renderer_);
- collection->AddAudioRenderer(new NullAudioRenderer());
+ collection->AddAudioRenderer(new AudioRendererBase(new NullAudioSink()));
return collection.Pass();
}
diff --git a/media/media.gyp b/media/media.gyp
index 47d1337..7b5ca06 100644
--- a/media/media.gyp
+++ b/media/media.gyp
@@ -93,6 +93,8 @@
'audio/mac/audio_manager_mac.h',
'audio/mac/audio_output_mac.cc',
'audio/mac/audio_output_mac.h',
+ 'audio/null_audio_sink.cc',
+ 'audio/null_audio_sink.h',
'audio/pulse/pulse_output.cc',
'audio/pulse/pulse_output.h',
'audio/simple_sources.cc',
@@ -209,8 +211,6 @@
'filters/gpu_video_decoder.h',
'filters/in_memory_url_protocol.cc',
'filters/in_memory_url_protocol.h',
- 'filters/null_audio_renderer.cc',
- 'filters/null_audio_renderer.h',
'filters/video_frame_generator.cc',
'filters/video_frame_generator.h',
'filters/video_renderer_base.cc',
diff --git a/media/tools/player_wtl/movie.cc b/media/tools/player_wtl/movie.cc
index 781f377..110acc0 100644
--- a/media/tools/player_wtl/movie.cc
+++ b/media/tools/player_wtl/movie.cc
@@ -9,15 +9,16 @@
#include "base/threading/platform_thread.h"
#include "base/utf_string_conversions.h"
#include "media/audio/audio_manager.h"
+#include "media/audio/null_audio_sink.h"
#include "media/base/filter_collection.h"
#include "media/base/media_log.h"
#include "media/base/message_loop_factory.h"
#include "media/base/pipeline.h"
+#include "media/filters/audio_renderer_base.h"
#include "media/filters/ffmpeg_audio_decoder.h"
#include "media/filters/ffmpeg_demuxer.h"
#include "media/filters/ffmpeg_video_decoder.h"
#include "media/filters/file_data_source.h"
-#include "media/filters/null_audio_renderer.h"
#include "media/filters/video_renderer_base.h"
namespace media {
@@ -83,7 +84,8 @@ bool Movie::Open(const wchar_t* url, VideoRendererBase* video_renderer) {
"VideoDecoderThread")));
// TODO(vrk): Re-enabled audio. (crbug.com/112159)
- collection->AddAudioRenderer(new media::NullAudioRenderer());
+ collection->AddAudioRenderer(
+ new media::AudioRendererBase(new media::NullAudioSink()));
collection->AddVideoRenderer(video_renderer);
// Create and start our pipeline.
diff --git a/media/tools/player_x11/player_x11.cc b/media/tools/player_x11/player_x11.cc
index ce2ff07..b22e696 100644
--- a/media/tools/player_x11/player_x11.cc
+++ b/media/tools/player_x11/player_x11.cc
@@ -16,6 +16,7 @@
#include "base/threading/platform_thread.h"
#include "base/threading/thread.h"
#include "media/audio/audio_manager.h"
+#include "media/audio/null_audio_sink.h"
#include "media/base/filter_collection.h"
#include "media/base/media.h"
#include "media/base/media_log.h"
@@ -23,11 +24,11 @@
#include "media/base/message_loop_factory.h"
#include "media/base/pipeline.h"
#include "media/base/video_frame.h"
+#include "media/filters/audio_renderer_base.h"
#include "media/filters/ffmpeg_audio_decoder.h"
#include "media/filters/ffmpeg_demuxer.h"
#include "media/filters/ffmpeg_video_decoder.h"
#include "media/filters/file_data_source.h"
-#include "media/filters/null_audio_renderer.h"
#include "media/filters/video_renderer_base.h"
#include "media/tools/player_x11/data_source_logger.h"
#include "media/tools/player_x11/gl_video_renderer.h"
@@ -127,7 +128,8 @@ bool InitPipeline(MessageLoop* message_loop,
true);
collection->AddVideoRenderer(g_video_renderer);
- collection->AddAudioRenderer(new media::NullAudioRenderer());
+ collection->AddAudioRenderer(
+ new media::AudioRendererBase(new media::NullAudioSink()));
// Create the pipeline and start it.
*pipeline = new media::Pipeline(message_loop, new media::MediaLog());
diff --git a/webkit/media/webmediaplayer_impl.cc b/webkit/media/webmediaplayer_impl.cc
index b13c997..2e945fc 100644
--- a/webkit/media/webmediaplayer_impl.cc
+++ b/webkit/media/webmediaplayer_impl.cc
@@ -12,13 +12,14 @@
#include "base/command_line.h"
#include "base/message_loop_proxy.h"
#include "base/metrics/histogram.h"
+#include "media/audio/null_audio_sink.h"
#include "media/base/filter_collection.h"
#include "media/base/limits.h"
#include "media/base/media_log.h"
#include "media/base/media_switches.h"
#include "media/base/pipeline.h"
#include "media/base/video_frame.h"
-#include "media/filters/null_audio_renderer.h"
+#include "media/filters/audio_renderer_base.h"
#include "media/filters/video_renderer_base.h"
#include "third_party/WebKit/Source/WebKit/chromium/public/WebVideoFrame.h"
#include "third_party/WebKit/Source/WebKit/chromium/public/WebView.h"
@@ -152,7 +153,8 @@ WebMediaPlayerImpl::WebMediaPlayerImpl(
proxy_->set_frame_provider(video_renderer);
// Create default audio renderer.
- filter_collection_->AddAudioRenderer(new media::NullAudioRenderer());
+ filter_collection_->AddAudioRenderer(
+ new media::AudioRendererBase(new media::NullAudioSink()));
}
WebMediaPlayerImpl::~WebMediaPlayerImpl() {